id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
188,634 | from mmdeploy.core import SYMBOLIC_REWRITER
The provided code snippet includes necessary dependencies for implementing the `deform_conv__default` function. Write a Python function `def deform_conv__default(g, input, offset, weight, stride, padding, dilation, groups, deform_groups, bias=False, im2col_step=32)` to solve the following problem:
Rewrite symbolic function for default backend.
Here is the function:
def deform_conv__default(g,
input,
offset,
weight,
stride,
padding,
dilation,
groups,
deform_groups,
bias=False,
im2col_step=32):
"""Rewrite symbolic function for default backend."""
return g.op(
'mmdeploy::MMCVDeformConv2d',
input,
offset,
weight,
stride_i=stride,
padding_i=[p for pair in zip(padding, padding) for p in pair],
dilation_i=dilation,
groups_i=groups,
deform_groups_i=deform_groups) | Rewrite symbolic function for default backend. |
188,635 | from mmdeploy.core import SYMBOLIC_REWRITER
The provided code snippet includes necessary dependencies for implementing the `deform_conv_openvino` function. Write a Python function `def deform_conv_openvino(g, input, offset, weight, stride, padding, dilation, groups, deform_groups, bias=False, im2col_step=32)` to solve the following problem:
Rewrite symbolic function for OpenVINO backend.
Here is the function:
def deform_conv_openvino(g,
input,
offset,
weight,
stride,
padding,
dilation,
groups,
deform_groups,
bias=False,
im2col_step=32):
"""Rewrite symbolic function for OpenVINO backend."""
assert not bias, 'The "bias" parameter should be False.'
assert groups == 1, 'The "groups" parameter should be 1.'
kh, kw = weight.type().sizes()[2:]
domain = 'org.openvinotoolkit'
op_name = 'DeformableConv2D'
return g.op(
f'{domain}::{op_name}',
input,
offset,
weight,
strides_i=stride,
pads_i=[p for pair in zip(padding, padding) for p in pair],
dilations_i=dilation,
groups_i=groups,
deformable_groups_i=deform_groups,
kernel_shape_i=[kh, kw]) | Rewrite symbolic function for OpenVINO backend. |
188,636 | from mmdeploy.core import SYMBOLIC_REWRITER
The provided code snippet includes necessary dependencies for implementing the `ms_deform_attn_default` function. Write a Python function `def ms_deform_attn_default( g, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step=64, )` to solve the following problem:
Rewrite msda symbolic function for all backend.
Here is the function:
def ms_deform_attn_default(
g,
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
im2col_step=64,
):
"""Rewrite msda symbolic function for all backend."""
return g.op(
'mmdeploy::MMCVMultiScaleDeformableAttention',
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
im2col_step_i=im2col_step,
) | Rewrite msda symbolic function for all backend. |
188,637 | import torch
from mmdeploy.core import FUNCTION_REWRITER, SYMBOLIC_REWRITER
from mmdeploy.utils import IR
from mmdeploy.backend.torchscript import get_ops_path, ops_available
assert ops_available(), 'torchscript custom ops is required.'
torch.ops.load_library(get_ops_path())
from torch.nn.modules.utils import _pair
kernel_h, kernel_w = weight.shape[-2:]
with_bias = bias is not None
if not with_bias:
bias = input.new_empty(0)
return torch.ops.mmdeploy.modulated_deform_conv(
input, weight, bias, offset, mask, kernel_h, kernel_w, stride[1],
stride[0], padding[1], padding[0], dilation[1], dilation[0], groups,
deform_groups, with_bias)
The provided code snippet includes necessary dependencies for implementing the `modulated_deform_conv__torchscript` function. Write a Python function `def modulated_deform_conv__torchscript(input, offset, mask, weight, bias, stride, padding, dilation, groups, deform_groups)` to solve the following problem:
rewriter for the custom torchscript mdcn op.
Here is the function:
def modulated_deform_conv__torchscript(input, offset, mask, weight, bias,
stride, padding, dilation, groups,
deform_groups):
"""rewriter for the custom torchscript mdcn op."""
from mmdeploy.backend.torchscript import get_ops_path, ops_available
assert ops_available(), 'torchscript custom ops is required.'
torch.ops.load_library(get_ops_path())
from torch.nn.modules.utils import _pair
kernel_h, kernel_w = weight.shape[-2:]
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
with_bias = bias is not None
if not with_bias:
bias = input.new_empty(0)
return torch.ops.mmdeploy.modulated_deform_conv(
input, weight, bias, offset, mask, kernel_h, kernel_w, stride[1],
stride[0], padding[1], padding[0], dilation[1], dilation[0], groups,
deform_groups, with_bias) | rewriter for the custom torchscript mdcn op. |
188,638 | import torch
from mmdeploy.core import FUNCTION_REWRITER, SYMBOLIC_REWRITER
from mmdeploy.utils import IR
from mmdeploy.backend.torchscript import get_ops_path, ops_available
from torch.nn.modules.utils import _pair
The provided code snippet includes necessary dependencies for implementing the `modulated_deform_conv_default` function. Write a Python function `def modulated_deform_conv_default(g, input, offset, mask, weight, bias, stride, padding, dilation, groups, deform_groups)` to solve the following problem:
Rewrite mdcn symbolic function for all backend.
Here is the function:
def modulated_deform_conv_default(g, input, offset, mask, weight, bias, stride,
padding, dilation, groups, deform_groups):
"""Rewrite mdcn symbolic function for all backend."""
input_tensors = [input, offset, mask, weight]
if bias is not None:
input_tensors.append(bias)
return g.op(
'mmdeploy::MMCVModulatedDeformConv2d',
*input_tensors,
stride_i=stride,
padding_i=padding,
dilation_i=dilation,
groups_i=groups,
deform_groups_i=deform_groups) | Rewrite mdcn symbolic function for all backend. |
188,639 | import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
from mmcv.ops.point_sample import denormalize
add_dim = False
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
The provided code snippet includes necessary dependencies for implementing the `point_sample__default` function. Write a Python function `def point_sample__default(input, points, align_corners=False, **kwargs)` to solve the following problem:
A wrapper around :func:`grid_sample` to support 3D point_coords tensors Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to lie inside ``[0, 1] x [0, 1]`` square. Args: input (torch.Tensor): Feature map, shape (N, C, H, W). points (torch.Tensor): Image based absolute point coordinates (normalized), range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2). align_corners (bool, optional): Whether align_corners. Default: False Returns: torch.Tensor: Features of `point` on `input`, shape (N, C, P) or (N, C, Hgrid, Wgrid).
Here is the function:
def point_sample__default(input, points, align_corners=False, **kwargs):
"""A wrapper around :func:`grid_sample` to support 3D point_coords tensors
Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to
lie inside ``[0, 1] x [0, 1]`` square.
Args:
input (torch.Tensor): Feature map, shape (N, C, H, W).
points (torch.Tensor): Image based absolute point coordinates
(normalized), range [0, 1] x [0, 1], shape (N, P, 2) or
(N, Hgrid, Wgrid, 2).
align_corners (bool, optional): Whether align_corners.
Default: False
Returns:
torch.Tensor: Features of `point` on `input`, shape (N, C, P) or
(N, C, Hgrid, Wgrid).
"""
from mmcv.ops.point_sample import denormalize
add_dim = False
if points.dim() == 3:
add_dim = True
points = points.unsqueeze(2)
output = F.grid_sample(
input, denormalize(points), align_corners=align_corners, **kwargs)
if add_dim:
output = output.squeeze(3)
return output | A wrapper around :func:`grid_sample` to support 3D point_coords tensors Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to lie inside ``[0, 1] x [0, 1]`` square. Args: input (torch.Tensor): Feature map, shape (N, C, H, W). points (torch.Tensor): Image based absolute point coordinates (normalized), range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2). align_corners (bool, optional): Whether align_corners. Default: False Returns: torch.Tensor: Features of `point` on `input`, shape (N, C, P) or (N, C, Hgrid, Wgrid). |
188,640 | import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
from mmcv.ops.point_sample import denormalize
The provided code snippet includes necessary dependencies for implementing the `simple_roialign__forward` function. Write a Python function `def simple_roialign__forward(self, features, rois)` to solve the following problem:
Rewrite `forward` of SimpleRoIAlign. Args: features (torch.Tensor): Feature map, shape (N, C, H, W). rois (torch.Tensor): Returns: torch.Tensor: RoI features.
Here is the function:
def simple_roialign__forward(self, features, rois):
"""Rewrite `forward` of SimpleRoIAlign.
Args:
features (torch.Tensor): Feature map, shape (N, C, H, W).
rois (torch.Tensor):
Returns:
torch.Tensor: RoI features.
"""
from mmcv.ops.point_sample import (generate_grid, point_sample,
rel_roi_point_to_rel_img_point)
num_imgs = features.size(0)
num_rois = rois.size(0)
rel_roi_points = generate_grid(
num_rois, self.output_size, device=rois.device)
rel_img_points = rel_roi_point_to_rel_img_point(rois, rel_roi_points,
features,
self.spatial_scale)
rel_img_points = rel_img_points.reshape(num_imgs, -1,
*rel_img_points.shape[1:])
point_feats = point_sample(
features, rel_img_points, align_corners=not self.aligned)
point_feats = point_feats.transpose(1, 2)
channels = features.size(1)
roi_feats = point_feats.reshape(num_rois, channels, *self.output_size)
return roi_feats | Rewrite `forward` of SimpleRoIAlign. Args: features (torch.Tensor): Feature map, shape (N, C, H, W). rois (torch.Tensor): Returns: torch.Tensor: RoI features. |
188,641 | import torch
from mmdeploy.codebase.mmdet.deploy import clip_bboxes
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `distance2bbox__default` function. Write a Python function `def distance2bbox__default(points, distance, max_shape=None)` to solve the following problem:
Rewrite `mmdet.core.bbox.transforms.distance2bbox` Decode distance prediction to bounding box. Args: ctx (ContextCaller): The context with additional information. points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4)
Here is the function:
def distance2bbox__default(points, distance, max_shape=None):
"""Rewrite `mmdet.core.bbox.transforms.distance2bbox`
Decode distance prediction to bounding box.
Args:
ctx (ContextCaller): The context with additional information.
points (Tensor): Shape (B, N, 2) or (N, 2).
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
x1 = points[..., 0] - distance[..., 0]
y1 = points[..., 1] - distance[..., 1]
x2 = points[..., 0] + distance[..., 2]
y2 = points[..., 1] + distance[..., 3]
bboxes = torch.stack([x1, y1, x2, y2], -1)
if max_shape is not None:
# clip bboxes with dynamic `min` and `max`
x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return bboxes
return bboxes | Rewrite `mmdet.core.bbox.transforms.distance2bbox` Decode distance prediction to bounding box. Args: ctx (ContextCaller): The context with additional information. points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) |
188,642 | import copy
import math
from functools import partial
from typing import Any, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from mmengine import Config
from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement, InstanceData
from torch import Tensor, nn
from mmdeploy.backend.base import get_backend_file_count
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
get_ir_config, get_partition_config,
get_quantization_config, load_config)
__BACKEND_MODEL = Registry('backend_detectors')
The provided code snippet includes necessary dependencies for implementing the `build_object_detection_model` function. Write a Python function `def build_object_detection_model( model_files: Sequence[str], model_cfg: Union[str, Config], deploy_cfg: Union[str, Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build object detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: End2EndModel: Detector for a configured backend.
Here is the function:
def build_object_detection_model(
model_files: Sequence[str],
model_cfg: Union[str, Config],
deploy_cfg: Union[str, Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build object detection model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | Config): Input model config file or Config
object.
deploy_cfg (str | Config): Input deployment config file or
Config object.
device (str): Device to input model
data_preprocessor (BaseDataPreprocessor | Config): The data
preprocessor of the model.
Returns:
End2EndModel: Detector for a configured backend.
"""
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
partition_config = get_partition_config(deploy_cfg)
if partition_config is not None:
partition_type = partition_config.get('type', None)
else:
codebase_config = get_codebase_config(deploy_cfg)
# Default Config is 'end2end'
partition_type = codebase_config.get('model_type', 'end2end')
backend_detector = __BACKEND_MODEL.build(
dict(
type=partition_type,
backend=backend,
backend_files=model_files,
device=device,
model_cfg=model_cfg,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_detector | Build object detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: End2EndModel: Detector for a configured backend. |
188,643 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
The provided code snippet includes necessary dependencies for implementing the `get_post_processing_params` function. Write a Python function `def get_post_processing_params(deploy_cfg: Union[str, mmengine.Config])` to solve the following problem:
Get mmdet post-processing parameters from config. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: dict: A dict of parameters for mmdet.
Here is the function:
def get_post_processing_params(deploy_cfg: Union[str, mmengine.Config]):
"""Get mmdet post-processing parameters from config.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
dict: A dict of parameters for mmdet.
"""
deploy_cfg = load_config(deploy_cfg)[0]
codebase_key = 'codebase_config'
assert codebase_key in deploy_cfg
codebase_config = deploy_cfg[codebase_key]
post_params = codebase_config.get('post_processing', None)
assert post_params is not None, 'Failed to get `post_processing`.'
return post_params | Get mmdet post-processing parameters from config. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: dict: A dict of parameters for mmdet. |
188,644 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
The provided code snippet includes necessary dependencies for implementing the `clip_bboxes` function. Write a Python function `def clip_bboxes(x1: Tensor, y1: Tensor, x2: Tensor, y2: Tensor, max_shape: Union[Tensor, Sequence[int]])` to solve the following problem:
Clip bboxes for onnx. Since torch.clamp cannot have dynamic `min` and `max`, we scale the boxes by 1/max_shape and clamp in the range [0, 1] if necessary. Args: x1 (Tensor): The x1 for bounding boxes. y1 (Tensor): The y1 for bounding boxes. x2 (Tensor): The x2 for bounding boxes. y2 (Tensor): The y2 for bounding boxes. max_shape (Tensor | Sequence[int]): The (H,W) of original image. Returns: tuple(Tensor): The clipped x1, y1, x2, y2.
Here is the function:
def clip_bboxes(x1: Tensor, y1: Tensor, x2: Tensor, y2: Tensor,
max_shape: Union[Tensor, Sequence[int]]):
"""Clip bboxes for onnx.
Since torch.clamp cannot have dynamic `min` and `max`, we scale the
boxes by 1/max_shape and clamp in the range [0, 1] if necessary.
Args:
x1 (Tensor): The x1 for bounding boxes.
y1 (Tensor): The y1 for bounding boxes.
x2 (Tensor): The x2 for bounding boxes.
y2 (Tensor): The y2 for bounding boxes.
max_shape (Tensor | Sequence[int]): The (H,W) of original image.
Returns:
tuple(Tensor): The clipped x1, y1, x2, y2.
"""
assert len(max_shape) == 2, '`max_shape` should be [h, w]'
if isinstance(max_shape, torch.Tensor):
# scale by 1/max_shape
x1 = x1 / max_shape[1]
y1 = y1 / max_shape[0]
x2 = x2 / max_shape[1]
y2 = y2 / max_shape[0]
# clamp [0, 1]
x1 = torch.clamp(x1, 0, 1)
y1 = torch.clamp(y1, 0, 1)
x2 = torch.clamp(x2, 0, 1)
y2 = torch.clamp(y2, 0, 1)
# scale back
x1 = x1 * max_shape[1]
y1 = y1 * max_shape[0]
x2 = x2 * max_shape[1]
y2 = y2 * max_shape[0]
else:
x1 = torch.clamp(x1, 0, max_shape[1])
y1 = torch.clamp(y1, 0, max_shape[0])
x2 = torch.clamp(x2, 0, max_shape[1])
y2 = torch.clamp(y2, 0, max_shape[0])
return x1, y1, x2, y2 | Clip bboxes for onnx. Since torch.clamp cannot have dynamic `min` and `max`, we scale the boxes by 1/max_shape and clamp in the range [0, 1] if necessary. Args: x1 (Tensor): The x1 for bounding boxes. y1 (Tensor): The y1 for bounding boxes. x2 (Tensor): The x2 for bounding boxes. y2 (Tensor): The y2 for bounding boxes. max_shape (Tensor | Sequence[int]): The (H,W) of original image. Returns: tuple(Tensor): The clipped x1, y1, x2, y2. |
188,645 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
The provided code snippet includes necessary dependencies for implementing the `clip_bboxes__trt8` function. Write a Python function `def clip_bboxes__trt8(x1: Tensor, y1: Tensor, x2: Tensor, y2: Tensor, max_shape: Union[Tensor, Sequence[int]])` to solve the following problem:
Clip bboxes for onnx. From TensorRT 8 we can do the operators on the tensors directly. Args: ctx (ContextCaller): The context with additional information. x1 (Tensor): The x1 for bounding boxes. y1 (Tensor): The y1 for bounding boxes. x2 (Tensor): The x2 for bounding boxes. y2 (Tensor): The y2 for bounding boxes. max_shape (Tensor | Sequence[int]): The (H,W) of original image. Returns: tuple(Tensor): The clipped x1, y1, x2, y2.
Here is the function:
def clip_bboxes__trt8(x1: Tensor, y1: Tensor, x2: Tensor, y2: Tensor,
max_shape: Union[Tensor, Sequence[int]]):
"""Clip bboxes for onnx. From TensorRT 8 we can do the operators on the
tensors directly.
Args:
ctx (ContextCaller): The context with additional information.
x1 (Tensor): The x1 for bounding boxes.
y1 (Tensor): The y1 for bounding boxes.
x2 (Tensor): The x2 for bounding boxes.
y2 (Tensor): The y2 for bounding boxes.
max_shape (Tensor | Sequence[int]): The (H,W) of original image.
Returns:
tuple(Tensor): The clipped x1, y1, x2, y2.
"""
assert len(max_shape) == 2, '`max_shape` should be [h, w]'
x1 = torch.clamp(x1, 0, max_shape[1])
y1 = torch.clamp(y1, 0, max_shape[0])
x2 = torch.clamp(x2, 0, max_shape[1])
y2 = torch.clamp(y2, 0, max_shape[0])
return x1, y1, x2, y2 | Clip bboxes for onnx. From TensorRT 8 we can do the operators on the tensors directly. Args: ctx (ContextCaller): The context with additional information. x1 (Tensor): The x1 for bounding boxes. y1 (Tensor): The y1 for bounding boxes. x2 (Tensor): The x2 for bounding boxes. y2 (Tensor): The y2 for bounding boxes. max_shape (Tensor | Sequence[int]): The (H,W) of original image. Returns: tuple(Tensor): The clipped x1, y1, x2, y2. |
188,646 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
def __pad_with_value_if_necessary(x: Tensor,
pad_dim: int,
pad_size: int,
pad_value: Optional[Any] = None) -> Tensor:
"""Pad a tensor with a value along some dim, do nothing on default.
Args:
x (Tensor): Input tensor.
pad_dim (int): Along which dim to pad.
pad_size (int): To which size to pad.
pad_value (Any): Filled value for padding. Defaults to `None`.
Returns:
Tensor: Padded tensor.
"""
return x
'mmdeploy.codebase.mmdet.deploy.utils.__pad_with_value_if_necessary',
backend=Backend.TENSORRT.value)
The provided code snippet includes necessary dependencies for implementing the `pad_with_value_if_necessary` function. Write a Python function `def pad_with_value_if_necessary(x: Tensor, pad_dim: int, pad_size: int, pad_value: Optional[Any] = None) -> Tensor` to solve the following problem:
Pad a tensor with a value along some dim if necessary. Args: x (Tensor): Input tensor. pad_dim (int): Along which dim to pad. pad_size (int): To which size to pad. pad_value (Any): Filled value for padding. Defaults to `None`. Returns: Tensor: Padded tensor.
Here is the function:
def pad_with_value_if_necessary(x: Tensor,
pad_dim: int,
pad_size: int,
pad_value: Optional[Any] = None) -> Tensor:
"""Pad a tensor with a value along some dim if necessary.
Args:
x (Tensor): Input tensor.
pad_dim (int): Along which dim to pad.
pad_size (int): To which size to pad.
pad_value (Any): Filled value for padding. Defaults to `None`.
Returns:
Tensor: Padded tensor.
"""
return __pad_with_value_if_necessary(
x, pad_dim, pad_size=pad_size, pad_value=pad_value) | Pad a tensor with a value along some dim if necessary. Args: x (Tensor): Input tensor. pad_dim (int): Along which dim to pad. pad_size (int): To which size to pad. pad_value (Any): Filled value for padding. Defaults to `None`. Returns: Tensor: Padded tensor. |
188,647 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
def pad_with_value(x: Tensor,
pad_dim: int,
pad_size: int,
pad_value: Optional[Any] = None):
"""Pad a tensor with a value along some dim.
Args:
x (Tensor): Input tensor.
pad_dim (int): Along which dim to pad.
pad_size (int): To which size to pad.
pad_value (Any): Filled value for padding. Defaults to `None`.
Returns:
Tensor: Padded tensor.
"""
x_shape = list(x.shape)
pad_shape = x_shape[:pad_dim] + [pad_size] + x_shape[pad_dim + 1:]
x_pad = x.new_zeros(pad_shape)
if pad_value is not None:
x_pad = x_pad + pad_value
x = torch.cat([x, x_pad], dim=pad_dim)
return x
The provided code snippet includes necessary dependencies for implementing the `__pad_with_value_if_necessary__tensorrt` function. Write a Python function `def __pad_with_value_if_necessary__tensorrt( x: Tensor, pad_dim: int, pad_size: int, pad_value: Optional[Any] = None) -> Tensor` to solve the following problem:
Pad a tensor with a value along some dim. Args: x (Tensor): Input tensor. pad_dim (int): Along which dim to pad. pad_size (int): To which size to pad. pad_value (Any): Filled value for padding. Defaults to `None`. Returns: Tensor: Padded tensor.
Here is the function:
def __pad_with_value_if_necessary__tensorrt(
x: Tensor,
pad_dim: int,
pad_size: int,
pad_value: Optional[Any] = None) -> Tensor:
"""Pad a tensor with a value along some dim.
Args:
x (Tensor): Input tensor.
pad_dim (int): Along which dim to pad.
pad_size (int): To which size to pad.
pad_value (Any): Filled value for padding. Defaults to `None`.
Returns:
Tensor: Padded tensor.
"""
return pad_with_value(x, pad_dim, pad_size=pad_size, pad_value=pad_value) | Pad a tensor with a value along some dim. Args: x (Tensor): Input tensor. pad_dim (int): Along which dim to pad. pad_size (int): To which size to pad. pad_value (Any): Filled value for padding. Defaults to `None`. Returns: Tensor: Padded tensor. |
188,648 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
class TRTGatherTopk(torch.autograd.Function):
def forward(ctx, x: torch.Tensor, inds: torch.Tensor):
"""Implement of gather topk."""
batch_size = x.size(0)
batch_inds = torch.arange(batch_size, device=inds.device).unsqueeze(-1)
return x[batch_inds, inds, ...]
def symbolic(g, x, inds):
"""symbolic of gather topk."""
out = g.op('mmdeploy::GatherTopk', x, inds, outputs=1)
return out
'mmdeploy.codebase.mmdet.deploy.utils.__gather_topk',
backend=Backend.TENSORRT.value)
The provided code snippet includes necessary dependencies for implementing the `__gather_topk__trt` function. Write a Python function `def __gather_topk__trt(*inputs: Sequence[torch.Tensor], inds: torch.Tensor, batch_size: int, is_batched: bool = True) -> Tuple[torch.Tensor]` to solve the following problem:
TensorRT gather_topk.
Here is the function:
def __gather_topk__trt(*inputs: Sequence[torch.Tensor],
inds: torch.Tensor,
batch_size: int,
is_batched: bool = True) -> Tuple[torch.Tensor]:
"""TensorRT gather_topk."""
ctx = FUNCTION_REWRITER.get_context()
_ = ctx
if is_batched:
index_shape = inds.shape
index_dim = inds.dim()
outputs = [None for _ in inputs]
for i, x in enumerate(inputs):
if x is None:
continue
out = TRTGatherTopk.apply(x, inds).to(x.dtype)
out_shape = [*index_shape, *x.shape[index_dim:]]
out = out.reshape(out_shape)
outputs[i] = out
else:
prior_inds = inds.new_zeros((1, 1))
outputs = [
x[prior_inds, inds, ...] if x is not None else None for x in inputs
]
return outputs | TensorRT gather_topk. |
188,649 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
The provided code snippet includes necessary dependencies for implementing the `__gather_topk__nonbatch` function. Write a Python function `def __gather_topk__nonbatch(*inputs: Sequence[torch.Tensor], inds: torch.Tensor, batch_size: int, is_batched: bool = True) -> Tuple[torch.Tensor]` to solve the following problem:
Single batch gather_topk.
Here is the function:
def __gather_topk__nonbatch(*inputs: Sequence[torch.Tensor],
inds: torch.Tensor,
batch_size: int,
is_batched: bool = True) -> Tuple[torch.Tensor]:
"""Single batch gather_topk."""
assert batch_size == 1
inds = inds.squeeze(0)
outputs = [x[:, inds, ...] if x is not None else None for x in inputs]
return outputs | Single batch gather_topk. |
188,650 | from typing import Any, Optional, Sequence, Tuple, Union
import mmengine
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.utils import Backend, load_config
def __gather_topk(*inputs: Sequence[torch.Tensor],
inds: torch.Tensor,
batch_size: int,
is_batched: bool = True) -> Tuple[torch.Tensor]:
"""The default implementation of gather_topk."""
if is_batched:
batch_inds = torch.arange(batch_size, device=inds.device).unsqueeze(-1)
outputs = [
x[batch_inds, inds, ...] if x is not None else None for x in inputs
]
else:
prior_inds = inds.new_zeros((1, 1))
outputs = [
x[prior_inds, inds, ...] if x is not None else None for x in inputs
]
return outputs
The provided code snippet includes necessary dependencies for implementing the `gather_topk` function. Write a Python function `def gather_topk(*inputs: Sequence[torch.Tensor], inds: torch.Tensor, batch_size: int, is_batched: bool = True) -> Tuple[torch.Tensor]` to solve the following problem:
Gather topk of each tensor. Args: inputs (Sequence[torch.Tensor]): Tensors to be gathered. inds (torch.Tensor): Topk index. batch_size (int): batch_size. is_batched (bool): Inputs is batched or not. Returns: Tuple[torch.Tensor]: Gathered tensors.
Here is the function:
def gather_topk(*inputs: Sequence[torch.Tensor],
inds: torch.Tensor,
batch_size: int,
is_batched: bool = True) -> Tuple[torch.Tensor]:
"""Gather topk of each tensor.
Args:
inputs (Sequence[torch.Tensor]): Tensors to be gathered.
inds (torch.Tensor): Topk index.
batch_size (int): batch_size.
is_batched (bool): Inputs is batched or not.
Returns:
Tuple[torch.Tensor]: Gathered tensors.
"""
import mmdeploy
outputs = mmdeploy.codebase.mmdet.deploy.utils.__gather_topk(
*inputs, inds=inds, batch_size=batch_size, is_batched=is_batched)
if len(outputs) == 1:
outputs = outputs[0]
return outputs | Gather topk of each tensor. Args: inputs (Sequence[torch.Tensor]): Tensors to be gathered. inds (torch.Tensor): Topk index. batch_size (int): batch_size. is_batched (bool): Inputs is batched or not. Returns: Tuple[torch.Tensor]: Gathered tensors. |
188,651 | from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Backend, Codebase, Task
from mmdeploy.utils.config_utils import (get_backend, get_input_shape,
is_dynamic_shape)
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
Config: the model config after processing.
"""
cfg = model_cfg.copy()
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.test_pipeline[0].type = 'mmdet.LoadImageFromNDArray'
pipeline = cfg.test_pipeline
for i, transform in enumerate(pipeline):
# for static exporting
if input_shape is not None:
if transform.type == 'Resize':
pipeline[i].keep_ratio = False
pipeline[i].scale = tuple(input_shape)
elif transform.type in ('YOLOv5KeepRatioResize', 'LetterResize'):
pipeline[i].scale = tuple(input_shape)
elif transform.type == 'Pad' and 'size' in transform:
pipeline[i].size = tuple(input_shape)
pipeline = [
transform for transform in pipeline
if transform.type != 'LoadAnnotations'
]
cfg.test_pipeline = pipeline
return cfg | Process the model config. Args: model_cfg (Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: Config: the model config after processing. |
188,652 | from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Backend, Codebase, Task
from mmdeploy.utils.config_utils import (get_backend, get_input_shape,
is_dynamic_shape)
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
from mmdet import datasets # noqa
from mmdet.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
dataset_cls = module_dict.get(dataset_cfg.type, None)
if dataset_cls is None:
continue
if hasattr(dataset_cls, '_load_metainfo') and isinstance(
dataset_cls._load_metainfo, Callable):
meta = dataset_cls._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_cls, 'METAINFO'):
return dataset_cls.METAINFO
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |
188,653 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_common_config
The provided code snippet includes necessary dependencies for implementing the `focus__forward__default` function. Write a Python function `def focus__forward__default(self, x)` to solve the following problem:
Rewrite forward function of Focus class. Replace slice with transpose.
Here is the function:
def focus__forward__default(self, x):
"""Rewrite forward function of Focus class.
Replace slice with transpose.
"""
# shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
B, C, H, W = x.shape
x = x.reshape(B, C, -1, 2, W)
x = x.reshape(B, C, x.shape[2], 2, -1, 2)
half_H = x.shape[2]
half_W = x.shape[4]
x = x.permute(0, 5, 3, 1, 2, 4)
x = x.reshape(B, C * 4, half_H, half_W)
return self.conv(x) | Rewrite forward function of Focus class. Replace slice with transpose. |
188,654 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_common_config
The provided code snippet includes necessary dependencies for implementing the `focus__forward__ncnn` function. Write a Python function `def focus__forward__ncnn(self, x)` to solve the following problem:
Rewrite forward function of Focus class for ncnn. Focus width and height information into channel space. ncnn does not support slice operator which step greater than 1, so we use another way to implement. Args: x (Tensor): The input tensor with shape (N, C, H, W). Returns: x (Tensor): The calculated tensor with shape (N, 4*C, H//2, W//2).
Here is the function:
def focus__forward__ncnn(self, x):
"""Rewrite forward function of Focus class for ncnn.
Focus width and height information into channel space. ncnn does not
support slice operator which step greater than 1, so we use another
way to implement.
Args:
x (Tensor): The input tensor with shape (N, C, H, W).
Returns:
x (Tensor): The calculated tensor with shape (N, 4*C, H//2, W//2).
"""
batch_size, c, h, w = x.shape
assert h % 2 == 0 and w % 2 == 0, f'focus for yolox needs even feature\
height and width, got {(h, w)}.'
x = x.reshape(batch_size, c * h, 1, w)
_b, _c, _h, _w = x.shape
g = torch.div(_c, 2, rounding_mode='floor')
# fuse to ncnn's shufflechannel
x = x.view(_b, g, 2, _h, _w)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(_b, -1, _h, _w)
x = x.reshape(_b, c * h * w, 1, 1)
_b, _c, _h, _w = x.shape
g = torch.div(_c, 2, rounding_mode='floor')
# fuse to ncnn's shufflechannel
x = x.view(_b, g, 2, _h, _w)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(_b, -1, _h, _w)
x = x.reshape(_b, c * 4, torch.div(h, 2, rounding_mode='floor'),
torch.div(w, 2, rounding_mode='floor'))
return self.conv(x) | Rewrite forward function of Focus class for ncnn. Focus width and height information into channel space. ncnn does not support slice operator which step greater than 1, so we use another way to implement. Args: x (Tensor): The input tensor with shape (N, C, H, W). Returns: x (Tensor): The calculated tensor with shape (N, 4*C, H//2, W//2). |
188,655 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_common_config
The provided code snippet includes necessary dependencies for implementing the `windowmsa__forward__tensorrt` function. Write a Python function `def windowmsa__forward__tensorrt(self, x, mask=None)` to solve the following problem:
Rewrite forward function of WindowMSA class for TensorRT. 1. replace Gather operation of qkv with split. 2. replace SoftMax operation with a workaround done by PyTorch. Args: x (tensor): input features with shape of (num_windows*B, N, C) mask (tensor | None, Optional): mask with shape of (num_windows, Wh*Ww, Wh*Ww), value should be between (-inf, 0].
Here is the function:
def windowmsa__forward__tensorrt(self, x, mask=None):
"""Rewrite forward function of WindowMSA class for TensorRT.
1. replace Gather operation of qkv with split.
2. replace SoftMax operation with a workaround done by PyTorch.
Args:
x (tensor): input features with shape of (num_windows*B, N, C)
mask (tensor | None, Optional): mask with shape of (num_windows,
Wh*Ww, Wh*Ww), value should be between (-inf, 0].
"""
ctx = FUNCTION_REWRITER.get_context()
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads,
-1).permute(2, 0, 3, 1, 4).contiguous()
# replace the gather operation with the split
q, k, v = [i.squeeze(0) for i in torch.split(qkv, 1, 0)]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(-1, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
# replace softmax with a workaround
# weird bug from TensorRT. softmax cannot be used here for fp32 and it
# can be used in fp16, but softmax fp16 performance is not as good as
# exp and log_softmax. Besides, only the UT of exp and log_softmax passed.
fp16_mode = get_common_config(ctx.cfg).get('fp16_mode', False)
if fp16_mode:
attn = torch.exp(torch.log_softmax(attn, dim=self.softmax.dim))
else:
means = torch.mean(attn, self.softmax.dim, keepdim=True)[0]
attn_exp = torch.exp(attn - means)
attn_exp_sum = torch.sum(attn_exp, self.softmax.dim, keepdim=True)
attn = attn_exp / attn_exp_sum
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).contiguous().reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x | Rewrite forward function of WindowMSA class for TensorRT. 1. replace Gather operation of qkv with split. 2. replace SoftMax operation with a workaround done by PyTorch. Args: x (tensor): input features with shape of (num_windows*B, N, C) mask (tensor | None, Optional): mask with shape of (num_windows, Wh*Ww, Wh*Ww), value should be between (-inf, 0]. |
188,656 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_common_config
The provided code snippet includes necessary dependencies for implementing the `shift_window_msa__window_reverse__tensorrt` function. Write a Python function `def shift_window_msa__window_reverse__tensorrt(self, windows, H, W)` to solve the following problem:
Rewrite window_reverse function of ShiftWindowMSA class for TensorRT. For TensorRT, seems radical shape transformations are not allowed. Replace them with soft ones. Args: windows: (num_windows*B, window_size, window_size, C) H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
Here is the function:
def shift_window_msa__window_reverse__tensorrt(self, windows, H, W):
"""Rewrite window_reverse function of ShiftWindowMSA class for TensorRT.
For TensorRT, seems radical shape transformations are not allowed. Replace
them with soft ones.
Args:
windows: (num_windows*B, window_size, window_size, C)
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
window_size = self.window_size
B = int(windows.shape[0] / (H * W / window_size / window_size))
# x = windows.view(B, H // window_size, W // window_size, window_size,
# window_size, -1)
x = windows.view(B, -1, W, window_size, windows.shape[-1])
x = x.view(B, x.shape[1], -1, window_size, window_size, x.shape[-1])
x = x.permute(0, 1, 3, 2, 4, 5).reshape(B, H, W, x.shape[-1])
return x | Rewrite window_reverse function of ShiftWindowMSA class for TensorRT. For TensorRT, seems radical shape transformations are not allowed. Replace them with soft ones. Args: windows: (num_windows*B, window_size, window_size, C) H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) |
188,657 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_common_config
The provided code snippet includes necessary dependencies for implementing the `shift_window_msa__window_partition__tensorrt` function. Write a Python function `def shift_window_msa__window_partition__tensorrt(self, x)` to solve the following problem:
Rewrite window_partition function of ShiftWindowMSA class for TensorRT. For TensorRT, seems radical shape transformations are not allowed. Replace them with soft ones. Args: x: (B, H, W, C) Returns: windows: (num_windows*B, window_size, window_size, C)
Here is the function:
def shift_window_msa__window_partition__tensorrt(self, x):
"""Rewrite window_partition function of ShiftWindowMSA class for TensorRT.
For TensorRT, seems radical shape transformations are not allowed. Replace
them with soft ones.
Args:
x: (B, H, W, C)
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
window_size = self.window_size
x = x.view(B, H, -1, window_size, C)
x = x.view(B, -1, window_size, x.shape[-3], window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()
windows = windows.view(-1, window_size, window_size, C)
return windows | Rewrite window_partition function of ShiftWindowMSA class for TensorRT. For TensorRT, seems radical shape transformations are not allowed. Replace them with soft ones. Args: x: (B, H, W, C) Returns: windows: (num_windows*B, window_size, window_size, C) |
188,658 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_common_config
The provided code snippet includes necessary dependencies for implementing the `shift_window_msa__forward__default` function. Write a Python function `def shift_window_msa__forward__default(self, query, hw_shape)` to solve the following problem:
Rewrite forward function of ShiftWindowMSA class. 1. replace dynamic padding with static padding and dynamic slice. 2. always do slice `x = x[:, :H, :W, :].contiguous()` for stability.
Here is the function:
def shift_window_msa__forward__default(self, query, hw_shape):
"""Rewrite forward function of ShiftWindowMSA class.
1. replace dynamic padding with static padding and dynamic slice.
2. always do slice `x = x[:, :H, :W, :].contiguous()` for stability.
"""
B, L, C = query.shape
H, W = hw_shape
assert L == H * W, 'input feature has wrong size'
query = query.view(B, H, W, C)
# pad feature maps to multiples of window size
query = query.permute(0, 3, 1, 2).contiguous()
# query = torch.nn.ZeroPad2d([0, self.window_size, 0, self.window_size])(
# query)
query = torch.cat(
[query, query.new_zeros(B, C, H, self.window_size)], dim=-1)
query = torch.cat(
[query,
query.new_zeros(B, C, self.window_size, query.shape[-1])],
dim=-2)
slice_h = torch.div(
(H + self.window_size - 1), self.window_size,
rounding_mode='floor') * self.window_size
slice_w = torch.div(
(W + self.window_size - 1), self.window_size,
rounding_mode='floor') * self.window_size
query = query[:, :, :slice_h, :slice_w]
query = query.permute(0, 2, 3, 1).contiguous()
H_pad, W_pad = query.shape[1], query.shape[2]
# cyclic shift
if self.shift_size > 0:
shifted_query = torch.roll(
query, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
# calculate attention mask for SW-MSA
w_mask = torch.cat([
shifted_query.new_zeros(W_pad - self.window_size),
shifted_query.new_full((self.window_size - self.shift_size, ), 1),
shifted_query.new_full((self.shift_size, ), 2)
])
h_mask = torch.cat([
shifted_query.new_zeros(H_pad - self.window_size),
shifted_query.new_full((self.window_size - self.shift_size, ), 3),
shifted_query.new_full((self.shift_size, ), 6)
])
img_mask = w_mask.unsqueeze(0) + h_mask.unsqueeze(1)
img_mask = img_mask.unsqueeze(0)
img_mask = img_mask.unsqueeze(-1)
# nW, window_size, window_size, 1
mask_windows = self.window_partition(img_mask)
mask_windows = mask_windows.view(-1,
self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-100.0)).masked_fill(
attn_mask == 0, float(0.0))
else:
shifted_query = query
attn_mask = None
# nW*B, window_size, window_size, C
query_windows = self.window_partition(shifted_query)
# nW*B, window_size*window_size, C
query_windows = query_windows.view(-1, self.window_size**2, C)
# W-MSA/SW-MSA (nW*B, window_size*window_size, C)
attn_windows = self.w_msa(query_windows, mask=attn_mask)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
# B H' W' C
shifted_x = self.window_reverse(attn_windows, H_pad, W_pad)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = self.drop(x)
return x | Rewrite forward function of ShiftWindowMSA class. 1. replace dynamic padding with static padding and dynamic slice. 2. always do slice `x = x[:, :H, :W, :].contiguous()` for stability. |
188,659 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `patch_merging__forward__tensorrt` function. Write a Python function `def patch_merging__forward__tensorrt(self, x, input_size)` to solve the following problem:
Rewrite forward function of PatchMerging class for TensorRT. In original implementation, mmdet applies nn.unfold to accelerate the inference. However, the onnx graph of it can not be parsed correctly by TensorRT. In mmdeploy, it is replaced. Args: x (Tensor): Has shape (B, H*W, C_in). input_size (tuple[int]): The spatial shape of x, arrange as (H, W). Default: None. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - out_size (tuple[int]): Spatial shape of x, arrange as (Merged_H, Merged_W).
Here is the function:
def patch_merging__forward__tensorrt(self, x, input_size):
"""Rewrite forward function of PatchMerging class for TensorRT. In original
implementation, mmdet applies nn.unfold to accelerate the inference.
However, the onnx graph of it can not be parsed correctly by TensorRT. In
mmdeploy, it is replaced.
Args:
x (Tensor): Has shape (B, H*W, C_in).
input_size (tuple[int]): The spatial shape of x, arrange as (H, W).
Default: None.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)
- out_size (tuple[int]): Spatial shape of x, arrange as
(Merged_H, Merged_W).
"""
H, W = input_size
B, L, C = x.shape
assert L == H * W, 'input feature has wrong size'
assert H % 2 == 0 and W % 2 == 0, f'x size ({H}*{W}) are not even.'
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x2 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = x.view(x.shape[0], x.shape[1], 4,
-1).permute(0, 1, 3, 2).reshape(x.shape[0], x.shape[1], -1)
x = self.norm(x) if self.norm else x
x = self.reduction(x)
out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] *
(self.sampler.kernel_size[0] - 1) -
1) // self.sampler.stride[0] + 1
out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] *
(self.sampler.kernel_size[1] - 1) -
1) // self.sampler.stride[1] + 1
output_size = (out_h, out_w)
return x, output_size | Rewrite forward function of PatchMerging class for TensorRT. In original implementation, mmdet applies nn.unfold to accelerate the inference. However, the onnx graph of it can not be parsed correctly by TensorRT. In mmdeploy, it is replaced. Args: x (Tensor): Has shape (B, H*W, C_in). input_size (tuple[int]): The spatial shape of x, arrange as (H, W). Default: None. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - out_size (tuple[int]): Spatial shape of x, arrange as (Merged_H, Merged_W). |
188,660 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `mask_matrix_nms__default` function. Write a Python function `def mask_matrix_nms__default(masks, labels, scores, filter_thr=-1, nms_pre=-1, max_num=-1, kernel='gaussian', sigma=2.0, mask_area=None)` to solve the following problem:
Matrix NMS for multi-class masks. Args: masks (Tensor): Has shape (num_instances, h, w) labels (Tensor): Labels of corresponding masks, has shape (num_instances,). scores (Tensor): Mask scores of corresponding masks, has shape (num_instances). filter_thr (float): Score threshold to filter the masks after matrix nms. Default: -1, which means do not use filter_thr. nms_pre (int): The max number of instances to do the matrix nms. Default: -1, which means do not use nms_pre. max_num (int, optional): If there are more than max_num masks after matrix, only top max_num will be kept. Default: -1, which means do not use max_num. kernel (str): 'linear' or 'gaussian'. sigma (float): std in gaussian method. mask_area (Tensor): The sum of seg_masks. Returns: tuple(Tensor): Processed mask results. - scores (Tensor): Updated scores, has shape (n,). - labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of the remaining mask in the input mask, has shape (n,).
Here is the function:
def mask_matrix_nms__default(masks,
labels,
scores,
filter_thr=-1,
nms_pre=-1,
max_num=-1,
kernel='gaussian',
sigma=2.0,
mask_area=None):
"""Matrix NMS for multi-class masks.
Args:
masks (Tensor): Has shape (num_instances, h, w)
labels (Tensor): Labels of corresponding masks,
has shape (num_instances,).
scores (Tensor): Mask scores of corresponding masks,
has shape (num_instances).
filter_thr (float): Score threshold to filter the masks
after matrix nms. Default: -1, which means do not
use filter_thr.
nms_pre (int): The max number of instances to do the matrix nms.
Default: -1, which means do not use nms_pre.
max_num (int, optional): If there are more than max_num masks after
matrix, only top max_num will be kept. Default: -1, which means
do not use max_num.
kernel (str): 'linear' or 'gaussian'.
sigma (float): std in gaussian method.
mask_area (Tensor): The sum of seg_masks.
Returns:
tuple(Tensor): Processed mask results.
- scores (Tensor): Updated scores, has shape (n,).
- labels (Tensor): Remained labels, has shape (n,).
- masks (Tensor): Remained masks, has shape (n, w, h).
- keep_inds (Tensor): The indices number of
the remaining mask in the input mask, has shape (n,).
"""
assert len(labels) == len(masks) == len(scores)
assert len(masks) == len(mask_area)
# sort and keep top nms_pre
nms_pre = max(0, nms_pre)
if nms_pre <= 0:
nms_pre = scores.shape[0]
scores, sort_inds = torch.topk(scores, nms_pre)
keep_inds = sort_inds
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = labels.size(0)
flatten_masks = masks.reshape(num_masks, -1).float()
# inter.
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.unsqueeze(1)
total_area = expanded_mask_area + expanded_mask_area.transpose(
1, 0) - inter_matrix
total_mask = total_area > 0
total_area = total_area.where(total_mask, total_area.new_ones(1))
iou_matrix = (inter_matrix / total_area).triu(diagonal=1)
expanded_labels = labels.unsqueeze(1)
label_matrix = expanded_labels == expanded_labels.transpose(1, 0)
# iou decay
decay_iou = iou_matrix.where(label_matrix, iou_matrix.new_zeros(1))
# iou compensation
compensate_iou, _ = decay_iou.max(0)
compensate_iou = compensate_iou.expand(num_masks,
num_masks).transpose(1, 0)
# calculate the decay_coefficient
if kernel == 'gaussian':
decay_matrix = torch.exp(-1 * sigma * (decay_iou**2))
compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2))
decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)
elif kernel == 'linear':
decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
decay_coefficient, _ = decay_matrix.min(0)
else:
raise NotImplementedError(
f'{kernel} kernel is not supported in matrix nms!')
# update the score.
scores = scores * decay_coefficient
keep = scores >= filter_thr
scores = scores.where(keep, scores.new_zeros(1))
# sort and keep top max_num
scores, sort_inds = torch.topk(scores, max(max_num, 0))
keep_inds = keep_inds[sort_inds]
masks = masks[sort_inds]
labels = labels[sort_inds]
return scores, labels, masks, keep_inds | Matrix NMS for multi-class masks. Args: masks (Tensor): Has shape (num_instances, h, w) labels (Tensor): Labels of corresponding masks, has shape (num_instances,). scores (Tensor): Mask scores of corresponding masks, has shape (num_instances). filter_thr (float): Score threshold to filter the masks after matrix nms. Default: -1, which means do not use filter_thr. nms_pre (int): The max number of instances to do the matrix nms. Default: -1, which means do not use nms_pre. max_num (int, optional): If there are more than max_num masks after matrix, only top max_num will be kept. Default: -1, which means do not use max_num. kernel (str): 'linear' or 'gaussian'. sigma (float): std in gaussian method. mask_area (Tensor): The sum of seg_masks. Returns: tuple(Tensor): Processed mask results. - scores (Tensor): Updated scores, has shape (n,). - labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of the remaining mask in the input mask, has shape (n,). |
188,661 | from typing import List, Optional
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
The provided code snippet includes necessary dependencies for implementing the `fovea_head__predict_by_feat` function. Write a Python function `def fovea_head__predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData` to solve the following problem:
Rewrite `predict_by_feat` of `FoveaHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. self (FoveaHead): The instance of the class FoveaHead. cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. batch_img_metas (list[dict]): Meta information of the image, e.g., image size, scaling factor, etc. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. Returns: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det].
Here is the function:
def fovea_head__predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Rewrite `predict_by_feat` of `FoveaHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
self (FoveaHead): The instance of the class FoveaHead.
cls_scores (list[Tensor]): Box scores for each scale level
with shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
score_factors (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Default None.
batch_img_metas (list[dict]): Meta information of the image, e.g.,
image size, scaling factor, etc.
cfg (mmengine.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
"""
ctx = FUNCTION_REWRITER.get_context()
assert len(cls_scores) == len(bbox_preds)
cfg = self.test_cfg if cfg is None else cfg
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device)
cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
img_shape = batch_img_metas[0]['img_shape']
batch_size = cls_scores[0].shape[0]
det_bboxes = []
det_scores = []
for cls_score, bbox_pred, base_len, point \
in zip(cls_score_list, bbox_pred_list,
self.base_edge_list, mlvl_priors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
x = point[:, 0]
y = point[:, 1]
scores = cls_score.permute(0, 2, 3,
1).reshape(batch_size, -1,
self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1,
4).exp()
x1 = (x - base_len * bbox_pred[:, :, 0]). \
clamp(min=0, max=img_shape[1] - 1)
y1 = (y - base_len * bbox_pred[:, :, 1]). \
clamp(min=0, max=img_shape[0] - 1)
x2 = (x + base_len * bbox_pred[:, :, 2]). \
clamp(min=0, max=img_shape[1] - 1)
y2 = (y + base_len * bbox_pred[:, :, 3]). \
clamp(min=0, max=img_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], -1)
det_bboxes.append(bboxes)
det_scores.append(scores)
det_bboxes = torch.cat(det_bboxes, dim=1)
if rescale:
scale_factor = batch_img_metas['scale_factor']
det_bboxes /= det_bboxes.new_tensor(scale_factor)
det_scores = torch.cat(det_scores, dim=1)
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
nms_pre = cfg.get('deploy_nms_pre', -1)
nms_type = cfg.nms.get('type')
return multiclass_nms(
det_bboxes,
det_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=nms_pre,
keep_top_k=cfg.max_per_img) | Rewrite `predict_by_feat` of `FoveaHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. self (FoveaHead): The instance of the class FoveaHead. cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. batch_img_metas (list[dict]): Meta information of the image, e.g., image size, scaling factor, etc. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. Returns: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. |
188,662 | from typing import List
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `centernet_head__predict_by_feat__default` function. Write a Python function `def centernet_head__predict_by_feat__default( self, center_heatmap_preds: List[Tensor], wh_preds: List[Tensor], offset_preds: List[Tensor], batch_img_metas: List[dict], rescale: bool = True, with_nms: bool = False)` to solve the following problem:
Rewrite `centernethead` of `CenterNetHead` for default backend.
Here is the function:
def centernet_head__predict_by_feat__default(
self,
center_heatmap_preds: List[Tensor],
wh_preds: List[Tensor],
offset_preds: List[Tensor],
batch_img_metas: List[dict],
rescale: bool = True,
with_nms: bool = False):
"""Rewrite `centernethead` of `CenterNetHead` for default backend."""
# The dynamic shape deploy of CenterNet get wrong result on TensorRT-8.4.x
# because of TensorRT bugs, https://github.com/NVIDIA/TensorRT/issues/2299,
# FYI.
assert len(center_heatmap_preds) == len(wh_preds) == len(offset_preds) == 1
batch_center_heatmap_preds = center_heatmap_preds[0]
batch_wh_preds = wh_preds[0]
batch_offset_preds = offset_preds[0]
batch_size = batch_center_heatmap_preds.shape[0]
img_shape = batch_img_metas[0]['img_shape']
batch_det_bboxes, batch_labels = self._decode_heatmap(
batch_center_heatmap_preds,
batch_wh_preds,
batch_offset_preds,
img_shape,
k=self.test_cfg.topk,
kernel=self.test_cfg.local_maximum_kernel)
det_bboxes = batch_det_bboxes.reshape([batch_size, -1, 5])
det_labels = batch_labels.reshape(batch_size, -1)
if with_nms:
det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels,
self.test_cfg)
return det_bboxes, det_labels | Rewrite `centernethead` of `CenterNetHead` for default backend. |
188,663 | from typing import Dict, List, Optional
import torch
from mmdet.models.utils import aligned_bilinear
from mmengine.config import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops.nms import multiclass_nms
def multiclass_nms(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False,
nms_type='nms'):
"""Apis for multiclass nms."""
if nms_type == 'nms':
return _multiclass_nms(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=output_index)
elif nms_type == 'nms_rotated':
return multiclass_nms_rotated(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
elif nms_type == 'nms_match':
return multiclass_nms_match(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
else:
raise NotImplementedError(f'Unsupported nms type: {nms_type}.')
func_name='mmdeploy.mmcv.ops.nms._multiclass_nms',
backend=Backend.COREML.value)
def condinst_bbox_head__predict_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
param_preds: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True,
):
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
assert len(cls_scores) == len(bbox_preds)
device = bbox_preds[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[-2:] for cls_score in cls_scores]
all_level_points_strides = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
all_level_points = [i[:, :2] for i in all_level_points_strides]
all_level_strides = [i[:, 2] for i in all_level_points_strides]
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
flatten_score_factors = [
score_factor.permute(0, 2, 3, 1).reshape(batch_size, -1, 1)
for score_factor in score_factors
]
flatten_param_preds = [
param_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, self.num_params)
for param_pred in param_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_score_factors = torch.cat(flatten_score_factors, dim=1).sigmoid()
flatten_param_preds = torch.cat(flatten_param_preds, dim=1)
points = torch.cat(all_level_points)
strides = torch.cat(all_level_strides)
tl_x = points[..., 0] - flatten_bbox_preds[..., 0]
tl_y = points[..., 1] - flatten_bbox_preds[..., 1]
br_x = points[..., 0] + flatten_bbox_preds[..., 2]
br_y = points[..., 1] + flatten_bbox_preds[..., 3]
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
scores = flatten_cls_scores
score_factors = flatten_score_factors
param_preds = flatten_param_preds
scores = scores * score_factors
# get post processing config
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
dets, labels, inds = multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=True,
)
batch_inds = torch.arange(batch_size, device=bboxes.device).view(-1, 1)
points = points.unsqueeze(0).repeat(batch_size, 1, 1)
strides = strides.unsqueeze(0).repeat(batch_size, 1)
param_preds = param_preds[batch_inds, inds, :]
points = points[batch_inds, inds, :]
strides = strides[batch_inds, inds]
results = dict(
dets=dets,
labels=labels,
param_preds=param_preds,
points=points,
strides=strides)
return results | null |
188,664 | from typing import Dict, List, Optional
import torch
from mmdet.models.utils import aligned_bilinear
from mmengine.config import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops.nms import multiclass_nms
def _parse_dynamic_params(self, params: Tensor):
"""parse the dynamic params for dynamic conv."""
batch_size = params.shape[0]
num_insts = params.shape[1]
params = params.permute(1, 0, 2)
params_splits = list(
torch.split_with_sizes(
params, self.weight_nums + self.bias_nums, dim=2))
weight_splits = params_splits[:self.num_layers]
bias_splits = params_splits[self.num_layers:]
for idx in range(self.num_layers):
if idx < self.num_layers - 1:
weight_splits[idx] = weight_splits[idx].reshape(
batch_size, num_insts, self.in_channels, -1)
else:
weight_splits[idx] = weight_splits[idx].reshape(
batch_size, num_insts, 1, -1)
return weight_splits, bias_splits
def _dynamic_conv_forward(features: Tensor, weights: List[Tensor],
biases: List[Tensor]):
"""dynamic forward, each layer follow a relu."""
n_layers = len(weights)
x = features.flatten(0, 1).flatten(2)
for i, (w, b) in enumerate(zip(weights, biases)):
# replace dynamic conv with bmm
w = w.flatten(0, 1)
b = b.flatten(0, 1).unsqueeze(2)
x = torch.bmm(w, x)
x = x + b
if i < n_layers - 1:
x = x.clamp_(min=0)
return x
def condinst_mask_head__forward(self, x: tuple,
positive_infos: Dict[str, torch.Tensor]):
mask_feats = self.mask_feature_head(x)
param_preds = positive_infos['param_preds']
points = positive_infos['points']
strides = positive_infos['strides']
batch_size = points.shape[0]
num_insts = points.shape[1]
hw = mask_feats.size()[-2:]
mask_feats = mask_feats.unsqueeze(1).repeat(1, num_insts, 1, 1, 1)
points = points.reshape(-1, 1, 2).unsqueeze(0)
locations = self.prior_generator.single_level_grid_priors(
hw, level_idx=0, device=mask_feats.device)
locations = locations.unsqueeze(0).repeat(batch_size, 1,
1).reshape(batch_size, 1, -1, 2)
centers = points.reshape(batch_size, -1, 1, 2)
rel_coordinates = (centers - locations).permute(0, 1, 3, 2).float()
rel_coordinates /= (strides[:, :, None, None] * self.size_of_interest)
rel_coords = rel_coordinates.reshape(batch_size, -1, 2, hw[0], hw[1])
mask_head_inputs = torch.cat([rel_coords, mask_feats], dim=2)
weights, biases = _parse_dynamic_params(self, param_preds)
mask_preds = _dynamic_conv_forward(mask_head_inputs, weights, biases)
mask_preds = mask_preds.reshape(batch_size, num_insts, hw[0], hw[1])
mask_preds = aligned_bilinear(
mask_preds, int(self.mask_feat_stride / self.mask_out_stride))
return (mask_preds, ) | null |
188,665 | from typing import Dict, List, Optional
import torch
from mmdet.models.utils import aligned_bilinear
from mmengine.config import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops.nms import multiclass_nms
def condinst_mask_head__predict_by_feat(self,
mask_preds: Tensor,
results_list: Dict[str, torch.Tensor],
batch_img_metas: List[dict],
rescale: bool = True,
**kwargs):
cfg = self.test_cfg
dets = results_list['dets']
labels = results_list['labels']
img_hw = batch_img_metas[0]['img_shape'][:2]
mask_preds = mask_preds.sigmoid()
mask_preds = aligned_bilinear(mask_preds, self.mask_out_stride)
mask_preds = mask_preds[:, :, :img_hw[0], :img_hw[1]]
masks = (mask_preds > cfg.mask_thr).float()
return dets, labels, masks | null |
188,666 | from typing import List
import torch
from torch import Tensor
from torch.nn import functional as F
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `detrhead__predict_by_feat__default` function. Write a Python function `def detrhead__predict_by_feat__default(self, all_cls_scores_list: List[Tensor], all_bbox_preds_list: List[Tensor], batch_img_metas: List[dict], rescale: bool = True)` to solve the following problem:
Rewrite `predict_by_feat` of `FoveaHead` for default backend.
Here is the function:
def detrhead__predict_by_feat__default(self,
all_cls_scores_list: List[Tensor],
all_bbox_preds_list: List[Tensor],
batch_img_metas: List[dict],
rescale: bool = True):
"""Rewrite `predict_by_feat` of `FoveaHead` for default backend."""
from mmdet.structures.bbox import bbox_cxcywh_to_xyxy
cls_scores = all_cls_scores_list[-1]
bbox_preds = all_bbox_preds_list[-1]
img_shape = batch_img_metas[0]['img_shape']
if isinstance(img_shape, list):
img_shape = torch.tensor(
img_shape, dtype=torch.long, device=cls_scores.device)
img_shape = img_shape.unsqueeze(0)
max_per_img = self.test_cfg.get('max_per_img', len(cls_scores[0]))
batch_size = cls_scores.size(0)
# `batch_index_offset` is used for the gather of concatenated tensor
# supports dynamical batch inference
if self.loss_cls.use_sigmoid:
batch_index_offset = torch.arange(batch_size).to(
cls_scores.device) * max_per_img
batch_index_offset = batch_index_offset.unsqueeze(1).expand(
batch_size, max_per_img)
cls_scores = cls_scores.sigmoid()
scores, indexes = cls_scores.flatten(1).topk(max_per_img, dim=1)
det_labels = indexes % self.num_classes
bbox_index = indexes // self.num_classes
bbox_index = (bbox_index + batch_index_offset).view(-1)
bbox_preds = bbox_preds.view(-1, 4)[bbox_index]
bbox_preds = bbox_preds.view(batch_size, -1, 4)
else:
scores, det_labels = F.softmax(cls_scores, dim=-1)[..., :-1].max(-1)
scores, bbox_index = scores.topk(max_per_img, dim=1)
batch_inds = torch.arange(
batch_size, device=scores.device).unsqueeze(-1)
bbox_preds = bbox_preds[batch_inds, bbox_index, ...]
# add unsqueeze to support tensorrt
det_labels = det_labels.unsqueeze(-1)[batch_inds, bbox_index,
...].squeeze(-1)
det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds)
det_bboxes.clamp_(min=0., max=1.)
shape_scale = img_shape.flip(1).repeat(1, 2).unsqueeze(1)
det_bboxes = det_bboxes * shape_scale
det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1)
return det_bboxes, det_labels | Rewrite `predict_by_feat` of `FoveaHead` for default backend. |
188,667 | from typing import List, Optional, Sequence
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `reppoints_head__points2bbox` function. Write a Python function `def reppoints_head__points2bbox(self, pts, y_first=True)` to solve the following problem:
Rewrite of `points2bbox` in `RepPointsHead`. Use `self.moment_transfer` in `points2bbox` will cause error: RuntimeError: Input, output and indices must be on the current device
Here is the function:
def reppoints_head__points2bbox(self, pts, y_first=True):
"""Rewrite of `points2bbox` in `RepPointsHead`.
Use `self.moment_transfer` in `points2bbox` will cause error:
RuntimeError: Input, output and indices must be on the current device
"""
ctx = FUNCTION_REWRITER.get_context()
update_moment = hasattr(self, 'moment_transfer')
if update_moment:
moment_transfer = self.moment_transfer
delattr(self, 'moment_transfer')
self.moment_transfer = torch.tensor(moment_transfer.data)
ret = ctx.origin_func(self, pts, y_first=y_first)
if update_moment:
self.moment_transfer = moment_transfer
return ret | Rewrite of `points2bbox` in `RepPointsHead`. Use `self.moment_transfer` in `points2bbox` will cause error: RuntimeError: Input, output and indices must be on the current device |
188,668 | from typing import List, Optional, Sequence
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import is_dynamic_shape
def _bbox_pre_decode(points: torch.Tensor, bbox_pred: torch.Tensor,
stride: torch.Tensor):
"""compute real bboxes."""
points = points[..., :2]
bbox_pos_center = torch.cat([points, points], dim=-1)
bboxes = bbox_pred * stride + bbox_pos_center
return bboxes
def _bbox_post_decode(bboxes: torch.Tensor, max_shape: Sequence[int]):
"""clamp bbox."""
x1 = bboxes[..., 0].clamp(min=0, max=max_shape[1])
y1 = bboxes[..., 1].clamp(min=0, max=max_shape[0])
x2 = bboxes[..., 2].clamp(min=0, max=max_shape[1])
y2 = bboxes[..., 3].clamp(min=0, max=max_shape[0])
decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
return decoded_bboxes
'mmdet.models.dense_heads.reppoints_head.RepPointsHead.points2bbox')
The provided code snippet includes necessary dependencies for implementing the `reppoints_head__predict_by_feat` function. Write a Python function `def reppoints_head__predict_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData` to solve the following problem:
Rewrite `predict_by_feat` of `RepPointsHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. self (RepPointsHead): The instance of the class RepPointsHead. cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. img_metas (list[dict]): Meta information of the image, e.g., image size, scaling factor, etc. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. Returns: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det].
Here is the function:
def reppoints_head__predict_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Rewrite `predict_by_feat` of `RepPointsHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
self (RepPointsHead): The instance of the class RepPointsHead.
cls_scores (list[Tensor]): Box scores for each scale level
with shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
score_factors (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Default None.
img_metas (list[dict]): Meta information of the image, e.g.,
image size, scaling factor, etc.
cfg (mmengine.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device)
mlvl_priors = [priors.unsqueeze(0) for priors in mlvl_priors]
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
assert batch_img_metas is not None
img_shape = batch_img_metas[0]['img_shape']
assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors)
batch_size = cls_scores[0].shape[0]
cfg = self.test_cfg
pre_topk = cfg.get('nms_pre', -1)
mlvl_valid_bboxes = []
mlvl_valid_scores = []
for level_idx, (cls_score, bbox_pred, priors) in enumerate(
zip(mlvl_cls_scores, mlvl_bbox_preds, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
else:
scores = scores.softmax(-1)
bbox_pred = bbox_pred.permute(0, 2, 3, 1)
bbox_pred = bbox_pred.reshape(batch_size, -1)
bbox_pred = (bbox_pred + 0).reshape(batch_size, -1, 4)
if not is_dynamic_flag:
priors = priors.data
if pre_topk > 0:
priors = pad_with_value_if_necessary(priors, 1, pre_topk)
bbox_pred = pad_with_value_if_necessary(bbox_pred, 1, pre_topk)
scores = pad_with_value_if_necessary(scores, 1, pre_topk, 0.)
nms_pre_score = scores
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = nms_pre_score.max(-1)
else:
max_scores, _ = nms_pre_score[..., :-1].max(-1)
_, topk_inds = max_scores.topk(pre_topk)
bbox_pred, scores = gather_topk(
bbox_pred,
scores,
inds=topk_inds,
batch_size=batch_size,
is_batched=True)
priors = gather_topk(
priors,
inds=topk_inds,
batch_size=batch_size,
is_batched=False)
bbox_pred = _bbox_pre_decode(priors, bbox_pred,
self.point_strides[level_idx])
mlvl_valid_bboxes.append(bbox_pred)
mlvl_valid_scores.append(scores)
batch_mlvl_bboxes_pred = torch.cat(mlvl_valid_bboxes, dim=1)
batch_scores = torch.cat(mlvl_valid_scores, dim=1)
batch_bboxes = _bbox_post_decode(
bboxes=batch_mlvl_bboxes_pred, max_shape=img_shape)
if cfg.get('min_bbox_size', -1) >= 0:
w = batch_bboxes[:, :, 2] - batch_bboxes[:, :, 0]
h = batch_bboxes[:, :, 3] - batch_bboxes[:, :, 1]
valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
if not valid_mask.all():
batch_scores = batch_scores * valid_mask.unsqueeze(-1)
if not self.use_sigmoid_cls:
batch_scores = batch_scores[..., :self.num_classes]
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
nms_type = cfg.nms.get('type')
return multiclass_nms(
batch_bboxes,
batch_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Rewrite `predict_by_feat` of `RepPointsHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. self (RepPointsHead): The instance of the class RepPointsHead. cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. img_metas (list[dict]): Meta information of the image, e.g., image size, scaling factor, etc. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. Returns: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. |
188,669 | from typing import Sequence
import numpy as np
import torch
from mmdet.utils import OptConfigType
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `yolov3_head__predict_by_feat` function. Write a Python function `def yolov3_head__predict_by_feat(self, pred_maps: Sequence[Tensor], cfg: OptConfigType = None, rescale: bool = False, with_nms: bool = True, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of `YOLOV3Head` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. pred_maps (Sequence[Tensor]): Raw predictions for a batch of images. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores
Here is the function:
def yolov3_head__predict_by_feat(self,
pred_maps: Sequence[Tensor],
cfg: OptConfigType = None,
rescale: bool = False,
with_nms: bool = True,
**kwargs):
"""Rewrite `predict_by_feat` of `YOLOV3Head` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
pred_maps (Sequence[Tensor]): Raw predictions for a batch of
images.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
If with_nms == True:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
Else:
tuple[Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
# mark pred_maps
@mark('yolo_head', inputs=['pred_maps'])
def __mark_pred_maps(pred_maps):
return pred_maps
pred_maps = __mark_pred_maps(pred_maps)
is_dynamic_flag = is_dynamic_shape(ctx.cfg)
num_levels = len(pred_maps)
pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)]
cfg = self.test_cfg if cfg is None else cfg
assert len(pred_maps_list) == self.num_levels
device = pred_maps_list[0].device
batch_size = pred_maps_list[0].shape[0]
featmap_sizes = [
pred_maps_list[i].shape[-2:] for i in range(self.num_levels)
]
multi_lvl_anchors = self.prior_generator.grid_anchors(
featmap_sizes, device)
pre_topk = cfg.get('nms_pre', -1)
multi_lvl_bboxes = []
multi_lvl_cls_scores = []
multi_lvl_conf_scores = []
for i in range(self.num_levels):
# get some key info for current scale
pred_map = pred_maps_list[i]
stride = self.featmap_strides[i]
# (b,h, w, num_anchors*num_attrib) ->
# (b,h*w*num_anchors, num_attrib)
pred_map = pred_map.permute(0, 2, 3,
1).reshape(batch_size, -1, self.num_attrib)
# Inplace operation like
# ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])```
# would create constant tensor when exporting to onnx
pred_map_conf = torch.sigmoid(pred_map[..., :2])
pred_map_rest = pred_map[..., 2:]
pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1)
pred_map_boxes = pred_map[..., :4]
multi_lvl_anchor = multi_lvl_anchors[i]
# use static anchor if input shape is static
if not is_dynamic_flag:
multi_lvl_anchor = multi_lvl_anchor.data
multi_lvl_anchor = multi_lvl_anchor.unsqueeze(0)
bbox_pred = self.bbox_coder.decode(multi_lvl_anchor, pred_map_boxes,
stride)
# conf and cls
conf_pred = torch.sigmoid(pred_map[..., 4])
cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
batch_size, -1, self.num_classes) # Cls pred one-hot.
# Save the result of current scale
multi_lvl_bboxes.append(bbox_pred)
multi_lvl_cls_scores.append(cls_pred)
multi_lvl_conf_scores.append(conf_pred)
# Merge the results of different scales together
batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1)
batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1)
batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1)
post_params = get_post_processing_params(deploy_cfg)
if pre_topk > 0:
_, topk_inds = conf_pred.topk(pre_topk)
batch_inds = torch.arange(
batch_size, device=device).unsqueeze(-1).long()
# Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501
transformed_inds = (bbox_pred.shape[1] * batch_inds + topk_inds.long())
bbox_pred = bbox_pred.reshape(-1, 4)[transformed_inds, :].reshape(
batch_size, -1, 4)
cls_pred = cls_pred.reshape(
-1, self.num_classes)[transformed_inds, :].reshape(
batch_size, -1, self.num_classes)
conf_pred = conf_pred.reshape(-1, 1)[transformed_inds].reshape(
batch_size, -1)
batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze(2)
batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores
if with_nms:
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
# keep aligned with original pipeline, improve
# mAP by 1% for YOLOv3 in ONNX
score_threshold = 0
nms_type = cfg.nms.get('type')
return multiclass_nms(
batch_mlvl_bboxes,
batch_mlvl_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
else:
return batch_mlvl_bboxes, batch_mlvl_scores | Rewrite `predict_by_feat` of `YOLOV3Head` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. pred_maps (Sequence[Tensor]): Raw predictions for a batch of images. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores |
188,670 | from typing import Sequence
import numpy as np
import torch
from mmdet.utils import OptConfigType
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `yolov3_head__predict_by_feat__ncnn` function. Write a Python function `def yolov3_head__predict_by_feat__ncnn(self, pred_maps, with_nms=True, cfg=None, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of YOLOV3Head for ncnn backend. 1. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. 2. Batch dimension is not supported by ncnn, but supported by pytorch. The negative value of axis in torch.cat is rewritten as corresponding positive value to avoid axis shift. 3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for correct `BinaryOps` calculation by ncnn. Args: ctx (ContextCaller): The context with additional information. self: Represent the instance of the original class. pred_maps (list[Tensor]): Raw predictions for a batch of images. with_nms (bool): If True, do nms before return boxes. Default: True. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. Returns: Tensor: Detection_output of shape [num_boxes, 6], each row is [label, score, x1, y1, x2, y2]. Note that fore-ground class label in Yolov3DetectionOutput starts from `1`. x1, y1, x2, y2 are normalized in range(0,1).
Here is the function:
def yolov3_head__predict_by_feat__ncnn(self,
pred_maps,
with_nms=True,
cfg=None,
**kwargs):
"""Rewrite `predict_by_feat` of YOLOV3Head for ncnn backend.
1. Shape node and batch inference is not supported by ncnn. This function
transform dynamic shape to constant shape and remove batch inference.
2. Batch dimension is not supported by ncnn, but supported by pytorch.
The negative value of axis in torch.cat is rewritten as corresponding
positive value to avoid axis shift.
3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by
ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for
correct `BinaryOps` calculation by ncnn.
Args:
ctx (ContextCaller): The context with additional information.
self: Represent the instance of the original class.
pred_maps (list[Tensor]): Raw predictions for a batch of images.
with_nms (bool): If True, do nms before return boxes.
Default: True.
cfg (mmengine.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
Returns:
Tensor: Detection_output of shape [num_boxes, 6],
each row is [label, score, x1, y1, x2, y2]. Note that
fore-ground class label in Yolov3DetectionOutput starts
from `1`. x1, y1, x2, y2 are normalized in range(0,1).
"""
ctx = FUNCTION_REWRITER.get_context()
num_levels = len(pred_maps)
cfg = self.test_cfg if cfg is None else cfg
post_params = get_post_processing_params(ctx.cfg)
confidence_threshold = cfg.get('conf_thr',
post_params.confidence_threshold)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
anchor_biases = np.array(
self.prior_generator.base_sizes).reshape(-1).tolist()
num_box = len(self.prior_generator.base_sizes[0])
bias_masks = list(range(num_levels * num_box))
def _create_yolov3_detection_output():
"""Help create Yolov3DetectionOutput op in ONNX."""
class Yolov3DetectionOutputOp(torch.autograd.Function):
"""Create Yolov3DetectionOutput op.
Args:
*inputs (Tensor): Multiple predicted feature maps.
num_class (int): Number of classes.
num_box (int): Number of box per grid.
confidence_threshold (float): Threshold of object
score.
nms_threshold (float): IoU threshold for NMS.
biases (List[float]: Base sizes to compute anchors
for each FPN.
mask (List[float]): Used to select base sizes in
biases.
anchors_scale (List[float]): Down-sampling scales of
each FPN layer, e.g.: [32, 16].
"""
@staticmethod
def forward(ctx, *args):
# create dummpy output of shape [num_boxes, 6],
# each row is [label, score, x1, y1, x2, y2]
output = torch.rand(100, 6)
return output
@staticmethod
def symbolic(g, *args):
anchors_scale = args[-1]
inputs = args[:len(anchors_scale)]
assert len(args) == (len(anchors_scale) + 7)
return g.op(
'mmdeploy::Yolov3DetectionOutput',
*inputs,
num_class_i=args[-7],
num_box_i=args[-6],
confidence_threshold_f=args[-5],
nms_threshold_f=args[-4],
biases_f=args[-3],
mask_f=args[-2],
anchors_scale_f=anchors_scale,
outputs=1)
return Yolov3DetectionOutputOp.apply(*pred_maps, self.num_classes,
num_box, confidence_threshold,
iou_threshold, anchor_biases,
bias_masks, self.featmap_strides)
output = _create_yolov3_detection_output()
return output | Rewrite `predict_by_feat` of YOLOV3Head for ncnn backend. 1. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. 2. Batch dimension is not supported by ncnn, but supported by pytorch. The negative value of axis in torch.cat is rewritten as corresponding positive value to avoid axis shift. 3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for correct `BinaryOps` calculation by ncnn. Args: ctx (ContextCaller): The context with additional information. self: Represent the instance of the original class. pred_maps (list[Tensor]): Raw predictions for a batch of images. with_nms (bool): If True, do nms before return boxes. Default: True. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. Returns: Tensor: Detection_output of shape [num_boxes, 6], each row is [label, score, x1, y1, x2, y2]. Note that fore-ground class label in Yolov3DetectionOutput starts from `1`. x1, y1, x2, y2 are normalized in range(0,1). |
188,671 | from typing import List, Optional
import torch
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `rpn_head__predict_by_feat` function. Write a Python function `def rpn_head__predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of `RPNHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness
Here is the function:
def rpn_head__predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True,
**kwargs):
"""Rewrite `predict_by_feat` of `RPNHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
score_factors (list[Tensor], optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Defaults to None.
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
If with_nms == True:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
Else:
tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes,
batch_mlvl_scores, batch_mlvl_centerness
"""
ctx = FUNCTION_REWRITER.get_context()
img_metas = batch_img_metas
assert len(cls_scores) == len(bbox_preds)
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(mlvl_anchors)
cfg = self.test_cfg if cfg is None else cfg
batch_size = mlvl_cls_scores[0].shape[0]
pre_topk = cfg.get('nms_pre', -1)
# loop over features, decode boxes
mlvl_valid_bboxes = []
mlvl_scores = []
mlvl_valid_anchors = []
for level_id, cls_score, bbox_pred, anchors in zip(
range(num_levels), mlvl_cls_scores, mlvl_bbox_preds, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(0, 2, 3, 1)
if self.use_sigmoid_cls:
cls_score = cls_score.reshape(batch_size, -1)
scores = cls_score.sigmoid()
else:
cls_score = cls_score.reshape(batch_size, -1, 2)
# We set FG labels to [0, num_class-1] and BG label to
# num_class in RPN head since mmdet v2.5, which is unified to
# be consistent with other head since mmdet v2.0. In mmdet v2.0
# to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
scores = cls_score.softmax(-1)[..., 0]
scores = scores.reshape(batch_size, -1, 1)
dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, dim)
# use static anchor if input shape is static
if not is_dynamic_flag:
anchors = anchors.data
anchors = anchors.unsqueeze(0)
# topk in tensorrt does not support shape<k
# concate zero to enable topk,
scores = pad_with_value_if_necessary(scores, 1, pre_topk, 0.)
bbox_pred = pad_with_value_if_necessary(bbox_pred, 1, pre_topk)
anchors = pad_with_value_if_necessary(anchors, 1, pre_topk)
if pre_topk > 0:
_, topk_inds = scores.squeeze(2).topk(pre_topk)
bbox_pred, scores = gather_topk(
bbox_pred,
scores,
inds=topk_inds,
batch_size=batch_size,
is_batched=True)
anchors = gather_topk(
anchors,
inds=topk_inds,
batch_size=batch_size,
is_batched=False)
mlvl_valid_bboxes.append(bbox_pred)
mlvl_scores.append(scores)
mlvl_valid_anchors.append(anchors)
batch_mlvl_bboxes = torch.cat(mlvl_valid_bboxes, dim=1)
batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1)
batch_mlvl_bboxes = self.bbox_coder.decode(
batch_mlvl_anchors,
batch_mlvl_bboxes,
max_shape=img_metas[0]['img_shape'])
# ignore background class
if not self.use_sigmoid_cls:
batch_mlvl_scores = batch_mlvl_scores[..., :self.num_classes]
if not with_nms:
return batch_mlvl_bboxes, batch_mlvl_scores
post_params = get_post_processing_params(deploy_cfg)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
# only one class in rpn
max_output_boxes_per_class = keep_top_k
nms_type = cfg.nms.get('type')
return multiclass_nms(
batch_mlvl_bboxes,
batch_mlvl_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Rewrite `predict_by_feat` of `RPNHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness |
188,672 | from typing import List, Optional
import torch
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `rpn_head__get_bboxes__ncnn` function. Write a Python function `def rpn_head__get_bboxes__ncnn(self, cls_scores, bbox_preds, img_metas, with_nms=True, cfg=None, **kwargs)` to solve the following problem:
Rewrite `get_bboxes` of `RPNHead` for ncnn backend. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Box scores for each level in the feature pyramid, has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each level in the feature pyramid, has shape (N, num_anchors * 4, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. with_nms (bool): If True, do nms before return boxes. Default: True. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores
Here is the function:
def rpn_head__get_bboxes__ncnn(self,
cls_scores,
bbox_preds,
img_metas,
with_nms=True,
cfg=None,
**kwargs):
"""Rewrite `get_bboxes` of `RPNHead` for ncnn backend.
Shape node and batch inference is not supported by ncnn. This function
transform dynamic shape to constant shape and remove batch inference.
Args:
ctx (ContextCaller): The context with additional information.
cls_scores (list[Tensor]): Box scores for each level in the
feature pyramid, has shape
(N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each
level in the feature pyramid, has shape
(N, num_anchors * 4, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
with_nms (bool): If True, do nms before return boxes.
Default: True.
cfg (mmengine.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
Default: None.
Returns:
If with_nms == True:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
Else:
tuple[Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores
"""
ctx = FUNCTION_REWRITER.get_context()
assert len(cls_scores) == len(bbox_preds)
deploy_cfg = ctx.cfg
assert not is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(mlvl_anchors)
cfg = self.test_cfg if cfg is None else cfg
batch_size = 1
pre_topk = cfg.get('nms_pre', -1)
# loop over features, decode boxes
mlvl_valid_bboxes = []
mlvl_scores = []
mlvl_valid_anchors = []
for level_id, cls_score, bbox_pred, anchors in zip(
range(num_levels), mlvl_cls_scores, mlvl_bbox_preds, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(0, 2, 3, 1)
if self.use_sigmoid_cls:
cls_score = cls_score.reshape(batch_size, -1)
scores = cls_score.sigmoid()
else:
cls_score = cls_score.reshape(batch_size, -1, 2)
# We set FG labels to [0, num_class-1] and BG label to
# num_class in RPN head since mmdet v2.5, which is unified to
# be consistent with other head since mmdet v2.0. In mmdet v2.0
# to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
scores = cls_score.softmax(-1)[..., 0]
scores = scores.reshape(batch_size, -1, 1)
dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, dim)
anchors = anchors.expand_as(bbox_pred).data
if pre_topk > 0:
_, topk_inds = scores.squeeze(2).topk(pre_topk)
topk_inds = topk_inds.view(-1)
anchors = anchors[:, topk_inds, :]
bbox_pred = bbox_pred[:, topk_inds, :]
scores = scores[:, topk_inds, :]
mlvl_valid_bboxes.append(bbox_pred)
mlvl_scores.append(scores)
mlvl_valid_anchors.append(anchors)
batch_mlvl_bboxes = torch.cat(mlvl_valid_bboxes, dim=1)
batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1)
batch_mlvl_bboxes = self.bbox_coder.decode(
batch_mlvl_anchors,
batch_mlvl_bboxes,
max_shape=img_metas[0]['img_shape'])
# ignore background class
if not self.use_sigmoid_cls:
batch_mlvl_scores = batch_mlvl_scores[..., :self.num_classes]
if not with_nms:
return batch_mlvl_bboxes, batch_mlvl_scores
post_params = get_post_processing_params(deploy_cfg)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
# only one class in rpn
max_output_boxes_per_class = keep_top_k
nms_type = cfg.nms.get('type')
return multiclass_nms(
batch_mlvl_bboxes,
batch_mlvl_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Rewrite `get_bboxes` of `RPNHead` for ncnn backend. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Box scores for each level in the feature pyramid, has shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each level in the feature pyramid, has shape (N, num_anchors * 4, H, W). img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. with_nms (bool): If True, do nms before return boxes. Default: True. cfg (mmengine.Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores |
188,673 | from typing import Dict, List
import torch
import torch.nn.functional as F
from mmdet.models.layers.matrix_nms import mask_matrix_nms
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `solov2_head__predict_by_feat` function. Write a Python function `def solov2_head__predict_by_feat(self, mlvl_kernel_preds: List[Tensor], mlvl_cls_scores: List[Tensor], mask_feats: Tensor, batch_img_metas: List[Dict], **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of `SOLOV2Head` for default backend. Args: mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel prediction. The kernel is used to generate instance segmentation masks by dynamic convolution. Each element in the list has shape (batch_size, kernel_out_channels, num_grids, num_grids). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids, num_grids). mask_feats (Tensor): Unified mask feature map used to generate instance segmentation masks by dynamic convolution. Has shape (batch_size, mask_out_channels, h, w). batch_img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w).
Here is the function:
def solov2_head__predict_by_feat(self, mlvl_kernel_preds: List[Tensor],
mlvl_cls_scores: List[Tensor],
mask_feats: Tensor,
batch_img_metas: List[Dict], **kwargs):
"""Rewrite `predict_by_feat` of `SOLOV2Head` for default backend.
Args:
mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel
prediction. The kernel is used to generate instance
segmentation masks by dynamic convolution. Each element in the
list has shape
(batch_size, kernel_out_channels, num_grids, num_grids).
mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element
in the list has shape
(batch_size, num_classes, num_grids, num_grids).
mask_feats (Tensor): Unified mask feature map used to generate
instance segmentation masks by dynamic convolution. Has shape
(batch_size, mask_out_channels, h, w).
batch_img_metas (list[dict]): Meta information of all images.
Returns:
list[:obj:`InstanceData`]: Processed results of multiple
images.Each :obj:`InstanceData` usually contains
following keys.
- scores (Tensor): Classification scores, has shape
(num_instance,).
- labels (Tensor): Has shape (num_instances,).
- masks (Tensor): Processed mask results, has
shape (num_instances, h, w).
"""
ctx = FUNCTION_REWRITER.get_context()
cfg = self.test_cfg
num_levels = len(mlvl_cls_scores)
batch_size = mlvl_cls_scores[0].size(0)
assert len(mlvl_kernel_preds) == len(mlvl_cls_scores)
for lvl in range(num_levels):
kernel_preds = mlvl_kernel_preds[lvl]
cls_scores = mlvl_cls_scores[lvl]
cls_scores = cls_scores.sigmoid()
local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1)
keep_mask = local_max[:, :, :-1, :-1] == cls_scores
cls_scores = cls_scores * keep_mask
mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1).view(
batch_size, -1, self.cls_out_channels)
mlvl_kernel_preds[lvl] = kernel_preds.permute(0, 2, 3, 1).view(
batch_size, -1, self.kernel_out_channels)
# Rewrite strides to avoid set_items.
mlvl_strides = [
torch.ones_like(mlvl_cls_scores[lvl][0, :, 0]) * self.strides[lvl]
for lvl in range(len(mlvl_cls_scores))
]
strides = torch.cat(mlvl_strides, 0)
assert len(mlvl_kernel_preds) == len(mlvl_cls_scores)
batch_mlvl_cls_scores = torch.cat(mlvl_cls_scores, dim=1)
batch_mlvl_kernel_preds = torch.cat(mlvl_kernel_preds, dim=1)
featmap_size = mask_feats.size()[-2:]
h, w = batch_img_metas[0]['img_shape'][:2]
batch_mlvl_cls_scores, cls_labels = torch.max(batch_mlvl_cls_scores, -1)
score_mask = (batch_mlvl_cls_scores > cfg.score_thr)
batch_mlvl_cls_scores = batch_mlvl_cls_scores.where(
score_mask, batch_mlvl_cls_scores.new_zeros(1)).view(-1)
cls_labels = cls_labels.view(-1)
# mask encoding.
kernel_preds = batch_mlvl_kernel_preds[0].unsqueeze(2).unsqueeze(3)
mask_preds = F.conv2d(
mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid()
aligned_score_mask = score_mask[0].unsqueeze(1).unsqueeze(2)
mask_preds = mask_preds.where(aligned_score_mask, mask_preds.new_zeros(1))
# mask.
masks = (mask_preds > cfg.mask_thr)
sum_masks = masks.sum((1, 2))
keep = sum_masks > strides
cls_scores = batch_mlvl_cls_scores.where(
keep, batch_mlvl_cls_scores.new_zeros(1))
sum_masks = sum_masks.where(keep, sum_masks.new_ones(1))
# maskness.
mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks
cls_scores *= mask_scores
sum_masks = sum_masks.where(keep, sum_masks.new_zeros(1))
scores, labels, _, keep_inds = mask_matrix_nms(
masks,
cls_labels,
cls_scores,
mask_area=sum_masks,
nms_pre=cfg.nms_pre,
max_num=cfg.max_per_img,
kernel=cfg.kernel,
sigma=cfg.sigma,
filter_thr=cfg.filter_thr)
mask_preds = mask_preds[keep_inds].unsqueeze(0)
post_params = get_post_processing_params(ctx.cfg)
export_postprocess_mask = post_params.get('export_postprocess_mask', True)
if export_postprocess_mask:
upsampled_size = (featmap_size[0] * self.mask_stride,
featmap_size[1] * self.mask_stride)
mask_preds = F.interpolate(
mask_preds, size=upsampled_size, mode='bilinear')
bboxes = scores.new_zeros(batch_size, scores.shape[-1], 4)
else:
bboxes = scores.new_zeros(batch_size, scores.shape[-1], 2)
# full screen box so we can postprocess mask outside the model
bboxes = torch.cat([
bboxes,
bboxes.new_full((*bboxes.shape[:2], 1), w),
bboxes.new_full((*bboxes.shape[:2], 1), h)
],
dim=-1)
labels = labels.reshape(batch_size, -1)
dets = torch.cat([bboxes, scores.reshape(batch_size, -1, 1)], dim=-1)
return dets, labels, mask_preds | Rewrite `predict_by_feat` of `SOLOV2Head` for default backend. Args: mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel prediction. The kernel is used to generate instance segmentation masks by dynamic convolution. Each element in the list has shape (batch_size, kernel_out_channels, num_grids, num_grids). mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element in the list has shape (batch_size, num_classes, num_grids, num_grids). mask_feats (Tensor): Unified mask feature map used to generate instance segmentation masks by dynamic convolution. Has shape (batch_size, mask_out_channels, h, w). batch_img_metas (list[dict]): Meta information of all images. Returns: list[:obj:`InstanceData`]: Processed results of multiple images.Each :obj:`InstanceData` usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,). - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). |
188,674 | from typing import List, Optional
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend
The provided code snippet includes necessary dependencies for implementing the `yolox_head__predict_by_feat` function. Write a Python function `def yolox_head__predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], objectnesses: Optional[List[Tensor]], batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> List[InstanceData]` to solve the following problem:
Rewrite `predict_by_feat` of `YOLOXHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box.
Here is the function:
def yolox_head__predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: Optional[List[Tensor]],
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> List[InstanceData]:
"""Rewrite `predict_by_feat` of `YOLOXHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx: Context that contains original meta information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor,
where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch
size and the score between 0 and 1. The shape of the second
tensor in the tuple is (N, num_box), and each element
represents the class label of the corresponding box.
"""
ctx = FUNCTION_REWRITER.get_context()
# mark pred_maps
@mark('yolo_head', inputs=['cls_scores', 'bbox_preds', 'objectnesses'])
def __mark_pred_maps(cls_scores, bbox_preds, objectnesses):
return cls_scores, bbox_preds, objectnesses
cls_scores, bbox_preds, objectnesses = __mark_pred_maps(
cls_scores, bbox_preds, objectnesses)
assert len(cls_scores) == len(bbox_preds) == len(objectnesses)
device = cls_scores[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(batch_size, -1)
for objectness in objectnesses
]
cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
score_factor = torch.cat(flatten_objectness, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_priors = torch.cat(mlvl_priors)
bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)
# directly multiply score factor and feed to nms
scores = cls_scores * (score_factor.unsqueeze(-1))
if not with_nms:
return bboxes, scores
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
nms_type = cfg.nms.get('type')
return multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Rewrite `predict_by_feat` of `YOLOXHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box. |
188,675 | from typing import List, Optional
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend
def is_dynamic_shape(deploy_cfg: Union[str, mmengine.Config],
input_name: Optional[str] = None) -> bool:
"""Check if input shape is dynamic.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
input_name (Optional[str]): The name of input in onnx export parameter.
Returns:
bool: Is config set dynamic shape (axis 2 and 3).
"""
# Always dynamic for exporting torchscript
if get_backend(deploy_cfg) == Backend.TORCHSCRIPT:
return True
deploy_cfg = load_config(deploy_cfg)[0]
ir_config = get_ir_config(deploy_cfg)
# check if input name is in the config
input_names = ir_config.get('input_names', None)
if input_name is None:
input_name = input_names[0] if input_names else 'input'
# check if dynamic axes exist
# TODO: update this when we have more IR
dynamic_axes = get_dynamic_axes(deploy_cfg)
if dynamic_axes is None:
return False
# check if given input name exist
input_axes = dynamic_axes.get(input_name, None)
if input_axes is None:
return False
# check if 2 (height) and 3 (width) in input axes
if 2 in input_axes or 3 in input_axes:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `yolox_head__predict_by_feat__ncnn` function. Write a Python function `def yolox_head__predict_by_feat__ncnn( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], objectnesses: Optional[List[Tensor]], batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True)` to solve the following problem:
Rewrite `predict_by_feat` of YOLOXHead for ncnn backend. 1. Decode the prior to a box format for ncnn DetectionOutput layer to do the post-processing. 2. Batch dimension is not supported by ncnn, but supported by pytorch. The negative value of axis in torch.cat is rewritten as corresponding positive value to avoid axis shift. 3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for correct `BinaryOps` calculation by ncnn. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: output__ncnn (Tensor): outputs, shape is [N, num_det, 6].
Here is the function:
def yolox_head__predict_by_feat__ncnn(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: Optional[List[Tensor]],
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True):
"""Rewrite `predict_by_feat` of YOLOXHead for ncnn backend.
1. Decode the prior to a box format for ncnn DetectionOutput layer to do
the post-processing.
2. Batch dimension is not supported by ncnn, but supported by pytorch.
The negative value of axis in torch.cat is rewritten as corresponding
positive value to avoid axis shift.
3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by
ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for
correct `BinaryOps` calculation by ncnn.
Args:
ctx: Context that contains original meta information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
output__ncnn (Tensor): outputs, shape is [N, num_det, 6].
"""
ctx = FUNCTION_REWRITER.get_context()
from mmdeploy.codebase.mmdet.ops import ncnn_detection_output_forward
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import is_dynamic_shape
dynamic_flag = is_dynamic_shape(ctx.cfg)
if dynamic_flag:
logger = get_root_logger()
logger.warning('YOLOX does not support dynamic shape with ncnn.')
img_height = int(batch_img_metas[0]['img_shape'][0])
img_width = int(batch_img_metas[0]['img_shape'][1])
assert len(cls_scores) == len(bbox_preds) == len(objectnesses)
device = cls_scores[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
mlvl_priors = [mlvl_prior.unsqueeze(0) for mlvl_prior in mlvl_priors]
flatten_priors = torch.cat(mlvl_priors, dim=1)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(batch_size, -1, 1)
for objectness in objectnesses
]
cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
dummy_cls_scores = torch.zeros(
batch_size, cls_scores.shape[-2], 1, device=cls_scores.device)
batch_mlvl_scores = torch.cat([dummy_cls_scores, cls_scores], dim=2)
score_factor = torch.cat(flatten_objectness, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
assert flatten_priors.shape[-1] == 4, f'yolox needs (B, N, 4) priors, got\
(B, N, {flatten_priors.shape[-1]})'
prior_box_x1 = (flatten_priors[:, :, 0:1] - flatten_priors[:, :, 2:3] / 2)\
/ img_width
prior_box_y1 = (flatten_priors[:, :, 1:2] - flatten_priors[:, :, 3:4] / 2)\
/ img_height
prior_box_x2 = (flatten_priors[:, :, 0:1] + flatten_priors[:, :, 2:3] / 2)\
/ img_width
prior_box_y2 = (flatten_priors[:, :, 1:2] + flatten_priors[:, :, 3:4] / 2)\
/ img_height
prior_box_ncnn = torch.cat(
[prior_box_x1, prior_box_y1, prior_box_x2, prior_box_y2], dim=2)
scores = batch_mlvl_scores.permute(0, 2, 1).unsqueeze(3) * \
score_factor.permute(0, 2, 1).unsqueeze(3)
scores = scores.squeeze(3).permute(0, 2, 1)
batch_mlvl_bboxes = flatten_bbox_preds.reshape(batch_size, 1, -1)
batch_mlvl_scores = scores.reshape(batch_size, 1, -1)
batch_mlvl_priors = prior_box_ncnn.reshape(batch_size, 1, -1)
batch_mlvl_vars = torch.ones_like(batch_mlvl_priors)
batch_mlvl_priors = torch.cat([batch_mlvl_priors, batch_mlvl_vars], dim=1)
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
vars = torch.tensor([1, 1, 1, 1], dtype=torch.float32)
output__ncnn = ncnn_detection_output_forward(
batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_priors,
score_threshold, iou_threshold, pre_top_k, keep_top_k,
self.num_classes + 1,
vars.cpu().detach().numpy())
return output__ncnn | Rewrite `predict_by_feat` of YOLOXHead for ncnn backend. 1. Decode the prior to a box format for ncnn DetectionOutput layer to do the post-processing. 2. Batch dimension is not supported by ncnn, but supported by pytorch. The negative value of axis in torch.cat is rewritten as corresponding positive value to avoid axis shift. 3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for correct `BinaryOps` calculation by ncnn. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: output__ncnn (Tensor): outputs, shape is [N, num_det, 6]. |
188,676 | from typing import Dict, List
import torch
from mmdet.models.layers import mask_matrix_nms
from mmdet.utils import OptConfigType
from torch import Tensor
from torch.nn import functional as F
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `solohead__predict_by_feat` function. Write a Python function `def solohead__predict_by_feat(self, mlvl_mask_preds: List[Tensor], mlvl_cls_scores: List[Tensor], batch_img_metas: List[Dict], cfg: OptConfigType = None, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of `SOLOHead` for default backend.
Here is the function:
def solohead__predict_by_feat(self,
mlvl_mask_preds: List[Tensor],
mlvl_cls_scores: List[Tensor],
batch_img_metas: List[Dict],
cfg: OptConfigType = None,
**kwargs):
"""Rewrite `predict_by_feat` of `SOLOHead` for default backend."""
ctx = FUNCTION_REWRITER.get_context()
batch_size = mlvl_cls_scores[0].size(0)
cfg = self.test_cfg
mlvl_cls_scores = [
item.permute(0, 2, 3, 1).view(item.size(0), -1, self.cls_out_channels)
for item in mlvl_cls_scores
]
# avoid setting items
lvl_strides = [
torch.ones_like(mlvl_cls_scores[lvl][0, :, 0]) * self.strides[lvl]
for lvl in range(len(mlvl_cls_scores))
]
strides = torch.cat(lvl_strides, 0)
assert len(mlvl_mask_preds) == len(mlvl_cls_scores)
batch_mlvl_cls_scores = torch.cat(mlvl_cls_scores, dim=1)
batch_mlvl_mask_preds = torch.cat(mlvl_mask_preds, dim=1)
featmap_size = batch_mlvl_mask_preds.size()[-2:]
batch_mlvl_cls_scores, cls_labels = torch.max(batch_mlvl_cls_scores, -1)
score_mask = (batch_mlvl_cls_scores > cfg.score_thr)
# pad zero to filter items
batch_mlvl_cls_scores = batch_mlvl_cls_scores.where(
score_mask, batch_mlvl_cls_scores.new_zeros(1)).view(-1)
cls_labels = cls_labels.view(-1)
mask_preds = batch_mlvl_mask_preds.view(-1, featmap_size[0],
featmap_size[1])
masks = (mask_preds > cfg.mask_thr)
sum_masks = masks.sum((1, 2))
keep = sum_masks > strides
# pad zero to filter items
cls_scores = batch_mlvl_cls_scores.where(
keep, batch_mlvl_cls_scores.new_zeros(1))
sum_masks = sum_masks.where(keep, sum_masks.new_ones(1))
# maskness
mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks
cls_scores *= mask_scores
sum_masks = sum_masks.where(keep, sum_masks.new_zeros(1))
scores, labels, _, keep_inds = mask_matrix_nms(
masks,
cls_labels,
cls_scores,
mask_area=sum_masks,
nms_pre=cfg.nms_pre,
max_num=cfg.max_per_img,
kernel=cfg.kernel,
sigma=cfg.sigma,
filter_thr=cfg.filter_thr)
h, w = batch_img_metas[0]['img_shape'][:2]
mask_preds = mask_preds[keep_inds].unsqueeze(0)
mmdet_params = get_post_processing_params(ctx.cfg)
export_postprocess_mask = mmdet_params.get('export_postprocess_mask', True)
if export_postprocess_mask:
upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4)
mask_preds = F.interpolate(
mask_preds, size=upsampled_size, mode='bilinear')
bboxes = scores.new_zeros(batch_size, scores.shape[-1], 4)
else:
bboxes = scores.new_zeros(batch_size, scores.shape[-1], 2)
# full screen box so we can postprocess mask outside the model
bboxes = torch.cat([
bboxes,
bboxes.new_full((*bboxes.shape[:2], 1), w),
bboxes.new_full((*bboxes.shape[:2], 1), h)
],
dim=-1)
labels = labels.reshape(batch_size, -1)
dets = torch.cat([bboxes, scores.reshape(batch_size, -1, 1)], dim=-1)
return dets, labels, mask_preds | Rewrite `predict_by_feat` of `SOLOHead` for default backend. |
188,677 | from typing import List, Optional
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend
The provided code snippet includes necessary dependencies for implementing the `rtmdet_head__predict_by_feat` function. Write a Python function `def rtmdet_head__predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> List[InstanceData]` to solve the following problem:
Rewrite `predict_by_feat` of `RTMDet` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box.
Here is the function:
def rtmdet_head__predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> List[InstanceData]:
"""Rewrite `predict_by_feat` of `RTMDet` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx: Context that contains original meta information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor,
where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch
size and the score between 0 and 1. The shape of the second
tensor in the tuple is (N, num_box), and each element
represents the class label of the corresponding box.
"""
@mark('rtmdet_head', inputs=['cls_scores', 'bbox_preds'])
def __mark_pred_maps(cls_scores, bbox_preds):
return cls_scores, bbox_preds
cls_scores, bbox_preds = __mark_pred_maps(cls_scores, bbox_preds)
ctx = FUNCTION_REWRITER.get_context()
assert len(cls_scores) == len(bbox_preds)
device = cls_scores[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=device)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
priors = torch.cat(mlvl_priors)
tl_x = (priors[..., 0] - flatten_bbox_preds[..., 0])
tl_y = (priors[..., 1] - flatten_bbox_preds[..., 1])
br_x = (priors[..., 0] + flatten_bbox_preds[..., 2])
br_y = (priors[..., 1] + flatten_bbox_preds[..., 3])
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
scores = flatten_cls_scores
if not with_nms:
return bboxes, scores
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
nms_type = cfg.nms.get('type')
return multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Rewrite `predict_by_feat` of `RTMDet` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box. |
188,678 | from typing import List, Optional
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend
def is_dynamic_shape(deploy_cfg: Union[str, mmengine.Config],
input_name: Optional[str] = None) -> bool:
"""Check if input shape is dynamic.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
input_name (Optional[str]): The name of input in onnx export parameter.
Returns:
bool: Is config set dynamic shape (axis 2 and 3).
"""
# Always dynamic for exporting torchscript
if get_backend(deploy_cfg) == Backend.TORCHSCRIPT:
return True
deploy_cfg = load_config(deploy_cfg)[0]
ir_config = get_ir_config(deploy_cfg)
# check if input name is in the config
input_names = ir_config.get('input_names', None)
if input_name is None:
input_name = input_names[0] if input_names else 'input'
# check if dynamic axes exist
# TODO: update this when we have more IR
dynamic_axes = get_dynamic_axes(deploy_cfg)
if dynamic_axes is None:
return False
# check if given input name exist
input_axes = dynamic_axes.get(input_name, None)
if input_axes is None:
return False
# check if 2 (height) and 3 (width) in input axes
if 2 in input_axes or 3 in input_axes:
return True
return False
The provided code snippet includes necessary dependencies for implementing the `rtmdet_head__predict_by_feat__ncnn` function. Write a Python function `def rtmdet_head__predict_by_feat__ncnn( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True)` to solve the following problem:
Rewrite `predict_by_feat` of RTMDetHead for ncnn backend. 1. Decode the prior to a box format for ncnn DetectionOutput layer to do the post-processing. 2. Batch dimension is not supported by ncnn, but supported by pytorch. The negative value of axis in torch.cat is rewritten as corresponding positive value to avoid axis shift. 3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for correct `BinaryOps` calculation by ncnn. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: output__ncnn (Tensor): outputs, shape is [N, num_det, 6].
Here is the function:
def rtmdet_head__predict_by_feat__ncnn(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True):
"""Rewrite `predict_by_feat` of RTMDetHead for ncnn backend.
1. Decode the prior to a box format for ncnn DetectionOutput layer to do
the post-processing.
2. Batch dimension is not supported by ncnn, but supported by pytorch.
The negative value of axis in torch.cat is rewritten as corresponding
positive value to avoid axis shift.
3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by
ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for
correct `BinaryOps` calculation by ncnn.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
output__ncnn (Tensor): outputs, shape is [N, num_det, 6].
"""
ctx = FUNCTION_REWRITER.get_context()
from mmdeploy.codebase.mmdet.ops import ncnn_detection_output_forward
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import is_dynamic_shape
dynamic_flag = is_dynamic_shape(ctx.cfg)
if dynamic_flag:
logger = get_root_logger()
logger.warning('RTMDet does not support dynamic shape with ncnn.')
img_height = int(batch_img_metas[0]['img_shape'][0])
img_width = int(batch_img_metas[0]['img_shape'][1])
assert len(cls_scores) == len(bbox_preds)
device = cls_scores[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
mlvl_priors = [mlvl_prior.unsqueeze(0) for mlvl_prior in mlvl_priors]
flatten_priors = torch.cat(mlvl_priors, dim=1)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
dummy_cls_scores = torch.zeros(
batch_size, cls_scores.shape[-2], 1, device=cls_scores.device)
batch_mlvl_scores = torch.cat([dummy_cls_scores, cls_scores], dim=2)
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
assert flatten_priors.shape[-1] == 4, f'rtmdet needs (B, N, 4) priors, got\
(B, N, {flatten_priors.shape[-1]})'
tl_x = (flatten_priors[:, :, 0:1] -
flatten_bbox_preds[:, :, 0:1]) / img_width
tl_y = (flatten_priors[:, :, 1:2] -
flatten_bbox_preds[:, :, 1:2]) / img_height
br_x = (flatten_priors[:, :, 0:1] +
flatten_bbox_preds[:, :, 2:3]) / img_width
br_y = (flatten_priors[:, :, 1:2] +
flatten_bbox_preds[:, :, 3:4]) / img_height
prior_box_ncnn = torch.stack([tl_x, tl_y, br_x, br_y], -1)
scores = batch_mlvl_scores
batch_mlvl_bboxes = flatten_bbox_preds.reshape(batch_size, 1, -1)
batch_mlvl_scores = scores.reshape(batch_size, 1, -1)
batch_mlvl_priors = prior_box_ncnn.reshape(batch_size, 1, -1)
batch_mlvl_vars = torch.ones_like(batch_mlvl_priors)
batch_mlvl_priors = torch.cat([batch_mlvl_priors, batch_mlvl_vars], dim=1)
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
vars = torch.tensor([1, 1, 1, 1], dtype=torch.float32)
output__ncnn = ncnn_detection_output_forward(
batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_priors,
score_threshold, iou_threshold, pre_top_k, keep_top_k,
self.num_classes + 1,
vars.cpu().detach().numpy())
return output__ncnn | Rewrite `predict_by_feat` of RTMDetHead for ncnn backend. 1. Decode the prior to a box format for ncnn DetectionOutput layer to do the post-processing. 2. Batch dimension is not supported by ncnn, but supported by pytorch. The negative value of axis in torch.cat is rewritten as corresponding positive value to avoid axis shift. 3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for correct `BinaryOps` calculation by ncnn. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). objectnesses (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, 1, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: output__ncnn (Tensor): outputs, shape is [N, num_det, 6]. |
188,679 | from typing import List, Optional
import torch
import torch.nn.functional as F
from mmengine.config import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops.nms import multiclass_nms
def _nms_with_mask_static(self,
priors: Tensor,
bboxes: Tensor,
scores: Tensor,
kernels: Tensor,
mask_feats: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
mask_thr_binary: float = 0.5):
"""Wrapper for `multiclass_nms` with ONNXRuntime.
Args:
ctx (ContextCaller): The context with additional information.
bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
Returns:
tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5]
and `labels` of shape [N, num_det].
"""
dets, labels, inds = multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=True)
batch_size = bboxes.shape[0]
batch_inds = torch.arange(batch_size, device=bboxes.device).view(-1, 1)
kernels = kernels[batch_inds, inds, :]
priors = priors.unsqueeze(0).repeat(batch_size, 1, 1)
priors = priors[batch_inds, inds, :]
mask_logits = _mask_predict_by_feat_single(self, mask_feats, kernels,
priors)
stride = self.prior_generator.strides[0][0]
mask_logits = F.interpolate(
mask_logits, scale_factor=stride, mode='bilinear')
masks = mask_logits.sigmoid()
return dets, labels, masks
The provided code snippet includes necessary dependencies for implementing the `rtmdet_ins_head__predict_by_feat` function. Write a Python function `def rtmdet_ins_head__predict_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], kernel_preds: List[Tensor], mask_feat: Tensor, score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True)` to solve the following problem:
Rewrite `predict_by_feat` of `RTMDet-Ins` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box.
Here is the function:
def rtmdet_ins_head__predict_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
kernel_preds: List[Tensor],
mask_feat: Tensor,
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True):
"""Rewrite `predict_by_feat` of `RTMDet-Ins` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx: Context that contains original meta information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor,
where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch
size and the score between 0 and 1. The shape of the second
tensor in the tuple is (N, num_box), and each element
represents the class label of the corresponding box.
"""
assert len(cls_scores) == len(bbox_preds)
device = cls_scores[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
flatten_kernel_preds = [
kernel_pred.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.num_gen_params)
for kernel_pred in kernel_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_kernel_preds = torch.cat(flatten_kernel_preds, dim=1)
priors = torch.cat(mlvl_priors)
tl_x = (priors[..., 0] - flatten_bbox_preds[..., 0])
tl_y = (priors[..., 1] - flatten_bbox_preds[..., 1])
br_x = (priors[..., 0] + flatten_bbox_preds[..., 2])
br_y = (priors[..., 1] + flatten_bbox_preds[..., 3])
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
scores = flatten_cls_scores
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
mask_thr_binary = cfg.get('mask_thr_binary', 0.5)
return _nms_with_mask_static(self, priors, bboxes, scores,
flatten_kernel_preds, mask_feat,
max_output_boxes_per_class, iou_threshold,
score_threshold, pre_top_k, keep_top_k,
mask_thr_binary) | Rewrite `predict_by_feat` of `RTMDet-Ins` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx: Context that contains original meta information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor, where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box. |
188,680 | from typing import List, Optional
import torch
from mmdet.models.dense_heads import PAAHead
from mmdet.models.task_modules.coders import (DeltaXYWHBBoxCoder,
DistancePointBBoxCoder,
TBLRBBoxCoder)
from mmdet.structures.bbox import BaseBoxes, get_box_tensor
from mmdet.structures.bbox.transforms import distance2bbox
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.codebase.mmdet.ops import ncnn_detection_output_forward
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `base_dense_head__predict_by_feat` function. Write a Python function `def base_dense_head__predict_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of `BaseDenseHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness
Here is the function:
def base_dense_head__predict_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True,
**kwargs):
"""Rewrite `predict_by_feat` of `BaseDenseHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
score_factors (list[Tensor], optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Defaults to None.
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
If with_nms == True:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
Else:
tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes,
batch_mlvl_scores, batch_mlvl_centerness
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device)
# anchor could be subclass of BaseBoxes in mmrotate
prior_type = type(mlvl_priors[0])
mlvl_priors = [get_box_tensor(priors) for priors in mlvl_priors]
mlvl_priors = [priors.unsqueeze(0) for priors in mlvl_priors]
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
if score_factors is None:
with_score_factors = False
mlvl_score_factor = [None for _ in range(num_levels)]
else:
with_score_factors = True
mlvl_score_factor = [
score_factors[i].detach() for i in range(num_levels)
]
mlvl_score_factors = []
assert batch_img_metas is not None
img_shape = batch_img_metas[0]['img_shape']
assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors)
batch_size = cls_scores[0].shape[0]
cfg = self.test_cfg
pre_topk = cfg.get('nms_pre', -1)
mlvl_valid_bboxes = []
mlvl_valid_scores = []
mlvl_valid_priors = []
for cls_score, bbox_pred, score_factors, priors in zip(
mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, mlvl_priors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
else:
scores = scores.softmax(-1)[:, :, :-1]
if with_score_factors:
score_factors = score_factors.permute(0, 2, 3,
1).reshape(batch_size,
-1).sigmoid()
score_factors = score_factors.unsqueeze(2)
dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, dim)
if not is_dynamic_flag:
priors = priors.data
if pre_topk > 0:
priors = pad_with_value_if_necessary(priors, 1, pre_topk)
bbox_pred = pad_with_value_if_necessary(bbox_pred, 1, pre_topk)
scores = pad_with_value_if_necessary(scores, 1, pre_topk, 0.)
if with_score_factors:
score_factors = pad_with_value_if_necessary(
score_factors, 1, pre_topk, 0.)
nms_pre_score = scores
if with_score_factors:
nms_pre_score = nms_pre_score * score_factors
if isinstance(self, PAAHead):
nms_pre_score = nms_pre_score.sqrt()
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = nms_pre_score.max(-1)
else:
max_scores, _ = nms_pre_score[..., :-1].max(-1)
_, topk_inds = max_scores.topk(pre_topk)
bbox_pred, scores, score_factors = gather_topk(
bbox_pred,
scores,
score_factors,
inds=topk_inds,
batch_size=batch_size,
is_batched=True)
priors = gather_topk(
priors,
inds=topk_inds,
batch_size=batch_size,
is_batched=False)
mlvl_valid_bboxes.append(bbox_pred)
mlvl_valid_scores.append(scores)
mlvl_valid_priors.append(priors)
if with_score_factors:
mlvl_score_factors.append(score_factors)
batch_mlvl_bboxes_pred = torch.cat(mlvl_valid_bboxes, dim=1)
batch_scores = torch.cat(mlvl_valid_scores, dim=1)
batch_priors = torch.cat(mlvl_valid_priors, dim=1)
if issubclass(prior_type, BaseBoxes):
batch_priors = prior_type(batch_priors, clone=False)
batch_bboxes = self.bbox_coder.decode(
batch_priors, batch_mlvl_bboxes_pred, max_shape=img_shape)
batch_bboxes = get_box_tensor(batch_bboxes)
if with_score_factors:
batch_score_factors = torch.cat(mlvl_score_factors, dim=1)
if not self.use_sigmoid_cls:
batch_scores = batch_scores[..., :self.num_classes]
if with_score_factors:
batch_scores = batch_scores * batch_score_factors
if isinstance(self, PAAHead):
batch_scores = batch_scores.sqrt()
if not with_nms:
return batch_bboxes, batch_scores
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
nms_type = cfg.nms.get('type')
return multiclass_nms(
batch_bboxes,
batch_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Rewrite `predict_by_feat` of `BaseDenseHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness |
188,681 | from typing import List, Optional
import torch
from mmdet.models.dense_heads import PAAHead
from mmdet.models.task_modules.coders import (DeltaXYWHBBoxCoder,
DistancePointBBoxCoder,
TBLRBBoxCoder)
from mmdet.structures.bbox import BaseBoxes, get_box_tensor
from mmdet.structures.bbox.transforms import distance2bbox
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.codebase.mmdet.ops import ncnn_detection_output_forward
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `base_dense_head__predict_by_feat__rknn` function. Write a Python function `def base_dense_head__predict_by_feat__rknn( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of `BaseDenseHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness
Here is the function:
def base_dense_head__predict_by_feat__rknn(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True,
**kwargs):
"""Rewrite `predict_by_feat` of `BaseDenseHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
score_factors (list[Tensor], optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Defaults to None.
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
If with_nms == True:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
Else:
tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes,
batch_mlvl_scores, batch_mlvl_centerness
"""
ctx = FUNCTION_REWRITER.get_context()
# mark nodes for partition
@mark('BaseDenseHead', outputs=['BaseDenseHead.cls', 'BaseDenseHead.loc'])
def __mark_dense_head(cls_scores, bbox_preds):
return cls_scores, bbox_preds
cls_scores, bbox_preds = __mark_dense_head(cls_scores, bbox_preds)
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device)
mlvl_priors = [priors.unsqueeze(0) for priors in mlvl_priors]
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
if score_factors is None:
with_score_factors = False
mlvl_score_factor = [None for _ in range(num_levels)]
else:
with_score_factors = True
mlvl_score_factor = [
score_factors[i].detach() for i in range(num_levels)
]
mlvl_score_factors = []
assert batch_img_metas is not None
img_shape = batch_img_metas[0]['img_shape']
assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors)
batch_size = cls_scores[0].shape[0]
mlvl_valid_bboxes = []
mlvl_valid_scores = []
mlvl_valid_priors = []
for cls_score, bbox_pred, score_factors, priors in zip(
mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, mlvl_priors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
else:
scores = scores.softmax(-1)[:, :, :-1]
if with_score_factors:
score_factors = score_factors.permute(0, 2, 3,
1).reshape(batch_size,
-1).sigmoid()
score_factors = score_factors.unsqueeze(2)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
if not is_dynamic_flag:
priors = priors.data
mlvl_valid_bboxes.append(bbox_pred)
mlvl_valid_scores.append(scores)
mlvl_valid_priors.append(priors)
if with_score_factors:
mlvl_score_factors.append(score_factors)
batch_mlvl_bboxes_pred = torch.cat(mlvl_valid_bboxes, dim=1)
batch_scores = torch.cat(mlvl_valid_scores, dim=1)
batch_priors = torch.cat(mlvl_valid_priors, dim=1)
batch_bboxes = self.bbox_coder.decode(
batch_priors, batch_mlvl_bboxes_pred, max_shape=img_shape)
if with_score_factors:
batch_score_factors = torch.cat(mlvl_score_factors, dim=1)
if not self.use_sigmoid_cls:
batch_scores = batch_scores[..., :self.num_classes]
if with_score_factors:
batch_scores = batch_scores * batch_score_factors
if isinstance(self, PAAHead):
batch_scores = batch_scores.sqrt()
return batch_bboxes, batch_scores | Rewrite `predict_by_feat` of `BaseDenseHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness |
188,682 | from typing import List, Optional
import torch
from mmdet.models.dense_heads import PAAHead
from mmdet.models.task_modules.coders import (DeltaXYWHBBoxCoder,
DistancePointBBoxCoder,
TBLRBBoxCoder)
from mmdet.structures.bbox import BaseBoxes, get_box_tensor
from mmdet.structures.bbox.transforms import distance2bbox
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.codebase.mmdet.ops import ncnn_detection_output_forward
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, is_dynamic_shape
def _tblr_pred_to_delta_xywh_pred(bbox_pred: torch.Tensor,
normalizer: torch.Tensor) -> torch.Tensor:
"""Transform tblr format bbox prediction to delta_xywh format for ncnn.
An internal function for transforming tblr format bbox prediction to
delta_xywh format. ncnn DetectionOutput layer needs delta_xywh format
bbox_pred as input.
Args:
bbox_pred (Tensor): The bbox prediction of tblr format, has shape
(N, num_det, 4).
normalizer (Tensor): The normalizer scale of bbox horizon and
vertical coordinates, has shape (2,).
Returns:
Tensor: The delta_xywh format bbox predictions.
"""
top = bbox_pred[:, :, 0:1]
bottom = bbox_pred[:, :, 1:2]
left = bbox_pred[:, :, 2:3]
right = bbox_pred[:, :, 3:4]
h = (top + bottom) * normalizer[0]
w = (left + right) * normalizer[1]
_dwh = torch.cat([w, h], dim=2)
assert torch.all(_dwh >= 0), 'wh must be positive before log.'
dwh = torch.log(_dwh)
return torch.cat([(right - left) / 2, (bottom - top) / 2, dwh], dim=2)
The provided code snippet includes necessary dependencies for implementing the `base_dense_head__predict_by_feat__ncnn` function. Write a Python function `def base_dense_head__predict_by_feat__ncnn( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of BaseDenseHead for ncnn backend. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: output__ncnn (Tensor): outputs, shape is [N, num_det, 6].
Here is the function:
def base_dense_head__predict_by_feat__ncnn(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True,
**kwargs):
"""Rewrite `predict_by_feat` of BaseDenseHead for ncnn backend.
Shape node and batch inference is not supported by ncnn. This function
transform dynamic shape to constant shape and remove batch inference.
Args:
ctx (ContextCaller): The context with additional information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
score_factors (list[Tensor], optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Defaults to None.
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
output__ncnn (Tensor): outputs, shape is [N, num_det, 6].
"""
ctx = FUNCTION_REWRITER.get_context()
assert len(cls_scores) == len(bbox_preds)
deploy_cfg = ctx.cfg
assert not is_dynamic_shape(deploy_cfg), 'base_dense_head for ncnn\
only supports static shape.'
if score_factors is None:
# e.g. Retina, FreeAnchor, Foveabox, etc.
with_score_factors = False
else:
# e.g. FCOS, PAA, ATSS, AutoAssign, etc.
with_score_factors = True
assert len(cls_scores) == len(score_factors)
batch_size = cls_scores[0].shape[0]
assert batch_size == 1, f'ncnn deployment requires batch size 1, \
got {batch_size}.'
num_levels = len(cls_scores)
if with_score_factors:
score_factor_list = score_factors
else:
score_factor_list = [None for _ in range(num_levels)]
if isinstance(self.bbox_coder, DeltaXYWHBBoxCoder):
vars = torch.tensor(self.bbox_coder.stds)
elif isinstance(self.bbox_coder, TBLRBBoxCoder):
normalizer = self.bbox_coder.normalizer
if isinstance(normalizer, float):
vars = torch.tensor([normalizer, normalizer, 1, 1],
dtype=torch.float32)
else:
assert len(normalizer) == 4, f'normalizer of tblr must be 4,\
got {len(normalizer)}'
assert (normalizer[0] == normalizer[1] and normalizer[2]
== normalizer[3]), 'normalizer between top \
and bottom, left and right must be the same value, or \
we can not transform it to delta_xywh format.'
vars = torch.tensor([normalizer[0], normalizer[2], 1, 1],
dtype=torch.float32)
elif isinstance(self.bbox_coder, DistancePointBBoxCoder):
vars = torch.tensor([0, 0, 0, 0], dtype=torch.float32)
else:
vars = torch.tensor([1, 1, 1, 1], dtype=torch.float32)
if isinstance(batch_img_metas[0]['img_shape'][0], int):
assert isinstance(batch_img_metas[0]['img_shape'][1], int)
img_height = batch_img_metas[0]['img_shape'][0]
img_width = batch_img_metas[0]['img_shape'][1]
else:
img_height = batch_img_metas[0]['img_shape'][0].item()
img_width = batch_img_metas[0]['img_shape'][1].item()
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=cls_scores[0].device)
batch_mlvl_priors = []
for i in range(num_levels):
_priors = mlvl_priors[i].reshape(1, -1, mlvl_priors[i].shape[-1])
x1 = _priors[:, :, 0:1] / img_width
y1 = _priors[:, :, 1:2] / img_height
x2 = _priors[:, :, 2:3] / img_width
y2 = _priors[:, :, 3:4] / img_height
priors = torch.cat([x1, y1, x2, y2], dim=2).data
batch_mlvl_priors.append(priors)
cfg = self.test_cfg if cfg is None else cfg
batch_mlvl_bboxes = []
batch_mlvl_scores = []
batch_mlvl_score_factors = []
for level_idx, (cls_score, bbox_pred, score_factor, priors) in \
enumerate(zip(cls_scores, bbox_preds,
score_factor_list, batch_mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
# ncnn needs 3 dimensions to reshape when including -1 parameter in
# width or height dimension.
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
if with_score_factors:
score_factor = score_factor.permute(0, 2, 3, 1).\
reshape(batch_size, -1, 1).sigmoid()
cls_score = cls_score.permute(0, 2, 3, 1).\
reshape(batch_size, -1, self.cls_out_channels)
# ncnn DetectionOutput op needs num_class + 1 classes. So if sigmoid
# score, we should padding background class according to mmdetection
# num_class definition.
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
dummy_background_score = torch.zeros(
batch_size, cls_score.shape[1], 1, device=cls_score.device)
scores = torch.cat([scores, dummy_background_score], dim=2)
else:
scores = cls_score.softmax(-1)
batch_mlvl_bboxes.append(bbox_pred)
batch_mlvl_scores.append(scores)
batch_mlvl_score_factors.append(score_factor)
batch_mlvl_priors = torch.cat(batch_mlvl_priors, dim=1)
batch_mlvl_scores = torch.cat(batch_mlvl_scores, dim=1)
batch_mlvl_bboxes = torch.cat(batch_mlvl_bboxes, dim=1)
batch_mlvl_scores = torch.cat([
batch_mlvl_scores[:, :, self.num_classes:],
batch_mlvl_scores[:, :, 0:self.num_classes]
],
dim=2)
if isinstance(self.bbox_coder, TBLRBBoxCoder):
batch_mlvl_bboxes = _tblr_pred_to_delta_xywh_pred(
batch_mlvl_bboxes, vars[0:2])
elif isinstance(self.bbox_coder, DistancePointBBoxCoder):
bboxes_x0 = batch_mlvl_bboxes[:, :, 0:1] / img_width
bboxes_y0 = batch_mlvl_bboxes[:, :, 1:2] / img_height
bboxes_x1 = batch_mlvl_bboxes[:, :, 2:3] / img_width
bboxes_y1 = batch_mlvl_bboxes[:, :, 3:4] / img_height
batch_mlvl_bboxes = torch.cat(
[bboxes_x0, bboxes_y0, bboxes_x1, bboxes_y1], dim=2)
batch_mlvl_priors = distance2bbox(batch_mlvl_priors, batch_mlvl_bboxes)
if with_score_factors:
batch_mlvl_score_factors = torch.cat(batch_mlvl_score_factors, dim=1)
batch_mlvl_scores = batch_mlvl_scores.permute(
0, 2, 1).unsqueeze(3) * batch_mlvl_score_factors.permute(
0, 2, 1).unsqueeze(3)
batch_mlvl_scores = batch_mlvl_scores.squeeze(3).permute(0, 2, 1)
if isinstance(self, PAAHead):
batch_mlvl_scores = batch_mlvl_scores.sqrt()
# flatten for ncnn DetectionOutput op inputs.
batch_mlvl_vars = vars.expand_as(batch_mlvl_priors)
batch_mlvl_bboxes = batch_mlvl_bboxes.reshape(batch_size, 1, -1)
batch_mlvl_scores = batch_mlvl_scores.reshape(batch_size, 1, -1)
batch_mlvl_priors = batch_mlvl_priors.reshape(batch_size, 1, -1)
batch_mlvl_vars = batch_mlvl_vars.reshape(batch_size, 1, -1)
batch_mlvl_priors = torch.cat([batch_mlvl_priors, batch_mlvl_vars], dim=1)
if not isinstance(self.bbox_coder, DistancePointBBoxCoder):
batch_mlvl_priors = batch_mlvl_priors.data
post_params = get_post_processing_params(ctx.cfg)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
output__ncnn = ncnn_detection_output_forward(
batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_priors,
score_threshold, iou_threshold, pre_top_k, keep_top_k,
self.num_classes + 1,
vars.cpu().detach().numpy())
return output__ncnn | Rewrite `predict_by_feat` of BaseDenseHead for ncnn backend. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: output__ncnn (Tensor): outputs, shape is [N, num_det, 6]. |
188,683 | from typing import List, Optional
import torch
import torch.nn.functional as F
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import Backend, get_backend, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `gfl_head__predict_by_feat` function. Write a Python function `def gfl_head__predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceData` to solve the following problem:
Rewrite `predict_by_feat` of `GFLHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. self (FoveaHead): The instance of the class FoveaHead. cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. batch_img_metas (list[dict]): Meta information of the image, e.g., image size, scaling factor, etc. cfg (Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness
Here is the function:
def gfl_head__predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Rewrite `predict_by_feat` of `GFLHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
self (FoveaHead): The instance of the class FoveaHead.
cls_scores (list[Tensor]): Box scores for each scale level
with shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
score_factors (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Default None.
batch_img_metas (list[dict]): Meta information of the image, e.g.,
image size, scaling factor, etc.
cfg (Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
If with_nms == True:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
Else:
tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes,
batch_mlvl_scores, batch_mlvl_centerness
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
backend = get_backend(deploy_cfg)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device)
mlvl_priors = [priors.unsqueeze(0) for priors in mlvl_priors]
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
if score_factors is None:
with_score_factors = False
mlvl_score_factor = [None for _ in range(num_levels)]
else:
with_score_factors = True
mlvl_score_factor = [
score_factors[i].detach() for i in range(num_levels)
]
mlvl_score_factors = []
assert batch_img_metas is not None
img_shape = batch_img_metas[0]['img_shape']
assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors)
batch_size = cls_scores[0].shape[0]
cfg = self.test_cfg
pre_topk = cfg.get('nms_pre', -1)
mlvl_valid_bboxes = []
mlvl_valid_scores = []
mlvl_valid_priors = []
for cls_score, bbox_pred, score_factors, priors, stride in zip(
mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, mlvl_priors,
self.prior_generator.strides):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
assert stride[0] == stride[1]
scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
if self.use_sigmoid_cls:
scores = scores.sigmoid()
nms_pre_score = scores
else:
scores = scores.softmax(-1)
nms_pre_score = scores
if with_score_factors:
score_factors = score_factors.permute(0, 2, 3,
1).reshape(batch_size,
-1).sigmoid()
score_factors = score_factors.unsqueeze(2)
def _batched_integral(intergral, x):
batch_size = x.size(0)
x = F.softmax(
x.reshape(batch_size, -1, intergral.reg_max + 1), dim=2)
x = F.linear(x,
intergral.project.type_as(x).unsqueeze(0)).reshape(
batch_size, -1, 4)
return x
bbox_pred = _batched_integral(
self.integral, bbox_pred.permute(0, 2, 3, 1)) * stride[0]
if not is_dynamic_flag:
priors = priors.data
if pre_topk > 0:
if with_score_factors:
nms_pre_score = nms_pre_score * score_factors
if backend == Backend.TENSORRT:
priors = pad_with_value(priors, 1, pre_topk)
bbox_pred = pad_with_value(bbox_pred, 1, pre_topk)
scores = pad_with_value(scores, 1, pre_topk, 0.)
nms_pre_score = pad_with_value(nms_pre_score, 1, pre_topk, 0.)
if with_score_factors:
score_factors = pad_with_value(score_factors, 1, pre_topk,
0.)
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = nms_pre_score.max(-1)
else:
max_scores, _ = nms_pre_score[..., :-1].max(-1)
_, topk_inds = max_scores.topk(pre_topk)
bbox_pred, scores, score_factors = gather_topk(
bbox_pred,
scores,
score_factors,
inds=topk_inds,
batch_size=batch_size,
is_batched=True)
priors = gather_topk(
priors,
inds=topk_inds,
batch_size=batch_size,
is_batched=False)
mlvl_valid_bboxes.append(bbox_pred)
mlvl_valid_scores.append(scores)
priors = self.anchor_center(priors)
mlvl_valid_priors.append(priors)
if with_score_factors:
mlvl_score_factors.append(score_factors)
batch_mlvl_bboxes_pred = torch.cat(mlvl_valid_bboxes, dim=1)
batch_scores = torch.cat(mlvl_valid_scores, dim=1)
batch_priors = torch.cat(mlvl_valid_priors, dim=1)
batch_bboxes = self.bbox_coder.decode(
batch_priors, batch_mlvl_bboxes_pred, max_shape=img_shape)
if with_score_factors:
batch_score_factors = torch.cat(mlvl_score_factors, dim=1)
if not self.use_sigmoid_cls:
batch_scores = batch_scores[..., :self.num_classes]
if with_score_factors:
batch_scores = batch_scores * batch_score_factors
if not with_nms:
return batch_bboxes, batch_scores
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
nms_type = cfg.nms.get('type')
return multiclass_nms(
batch_bboxes,
batch_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Rewrite `predict_by_feat` of `GFLHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. self (FoveaHead): The instance of the class FoveaHead. cls_scores (list[Tensor]): Box scores for each scale level with shape (N, num_anchors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W). score_factors (list[Tensor], Optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Default None. batch_img_metas (list[dict]): Meta information of the image, e.g., image size, scaling factor, etc. cfg (Config | None): Test / postprocessing configuration, if None, test_cfg would be used. Default: None. rescale (bool): If True, return boxes in original image space. Default: False. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness |
188,684 | import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `base_semantic_head__predict` function. Write a Python function `def base_semantic_head__predict(self, x, batch_img_metas, rescale=False)` to solve the following problem:
Rewrite `predict` for default backend. Support configured dynamic/static shape for model input and return semantic-segmentation result as Tensor instead of numpy array. Args: x (Union[Tensor, Tuple[Tensor]]): Feature maps. batch_img_metas (List[dict]): List of image information. rescale (bool): Whether to rescale the results. Defaults to False. Returns: Tensor: `semseg` of shape [N, num_sem_class, H, W]
Here is the function:
def base_semantic_head__predict(self, x, batch_img_metas, rescale=False):
"""Rewrite `predict` for default backend. Support configured dynamic/static
shape for model input and return semantic-segmentation result as Tensor
instead of numpy array.
Args:
x (Union[Tensor, Tuple[Tensor]]): Feature maps.
batch_img_metas (List[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
Tensor: `semseg` of shape [N, num_sem_class, H, W]
"""
seg_preds = self.forward(x)['seg_preds']
img_shape = batch_img_metas[0]['batch_input_shape']
seg_preds = F.interpolate(
seg_preds,
size=(img_shape[0], img_shape[1]),
mode='bilinear',
align_corners=False)
return seg_preds | Rewrite `predict` for default backend. Support configured dynamic/static shape for model input and return semantic-segmentation result as Tensor instead of numpy array. Args: x (Union[Tensor, Tuple[Tensor]]): Feature maps. batch_img_metas (List[dict]): List of image information. rescale (bool): Whether to rescale the results. Defaults to False. Returns: Tensor: `semseg` of shape [N, num_sem_class, H, W] |
188,685 | from typing import Tuple
import torch
from torch.onnx import symbolic_helper
from mmdeploy.core import FUNCTION_REWRITER
class GridPriorsTRTOp(torch.autograd.Function):
def forward(ctx, base_anchors, feat_h, feat_w, stride_h: int,
stride_w: int):
"""Generate grid priors by base anchors."""
# torch>=1.13 has runtime error
# when using torch.arange in autograd function
output = getattr(GridPriorsTRTOp, 'output', None)
if output is not None:
return output
device = base_anchors.device
dtype = base_anchors.dtype
shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w
shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h
def _meshgrid(x, y, row_major=True):
# use shape instead of len to keep tracing while exporting to onnx
xx = x.repeat(y.shape[0])
yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
shift_xx, shift_yy = _meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# then (0, 1), (0, 2), ...
return all_anchors
def symbolic(g, base_anchors, feat_h, feat_w, stride_h: int,
stride_w: int):
"""Map ops to onnx symbolics."""
# zero_h and zero_w is used to provide shape to GridPriorsTRT
feat_h = symbolic_helper._unsqueeze_helper(g, feat_h, [0])
feat_w = symbolic_helper._unsqueeze_helper(g, feat_w, [0])
zero_h = g.op(
'ConstantOfShape',
feat_h,
value_t=torch.tensor([0], dtype=torch.long),
)
zero_w = g.op(
'ConstantOfShape',
feat_w,
value_t=torch.tensor([0], dtype=torch.long),
)
return g.op(
'mmdeploy::GridPriorsTRT',
base_anchors,
zero_h,
zero_w,
stride_h_i=stride_h,
stride_w_i=stride_w)
grid_priors_trt = GridPriorsTRTOp.apply
The provided code snippet includes necessary dependencies for implementing the `anchorgenerator__single_level_grid_priors__trt` function. Write a Python function `def anchorgenerator__single_level_grid_priors__trt( self, featmap_size: Tuple[int], level_idx: int, dtype: torch.dtype = torch.float32, device: str = 'cuda') -> torch.Tensor` to solve the following problem:
This is a rewrite to replace ONNX anchor generator to TensorRT custom op. Args: ctx : The rewriter context featmap_size (tuple[int]): Size of the feature maps. level_idx (int): The index of corresponding feature map level. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps.
Here is the function:
def anchorgenerator__single_level_grid_priors__trt(
self,
featmap_size: Tuple[int],
level_idx: int,
dtype: torch.dtype = torch.float32,
device: str = 'cuda') -> torch.Tensor:
"""This is a rewrite to replace ONNX anchor generator to TensorRT custom
op.
Args:
ctx : The rewriter context
featmap_size (tuple[int]): Size of the feature maps.
level_idx (int): The index of corresponding feature map level.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (str, optional): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
ctx = FUNCTION_REWRITER.get_context()
from mmdet.models.task_modules.prior_generators import AnchorGenerator
if type(self) != AnchorGenerator:
# only use custom node on default generator.
return ctx.origin_func(
self,
featmap_size=featmap_size,
level_idx=level_idx,
dtype=dtype,
device=device)
feat_h, feat_w = featmap_size
output = ctx.origin_func(self, featmap_size, level_idx, dtype, device).data
if isinstance(feat_h, int) and isinstance(feat_w, int):
return output
base_anchors = self.base_anchors[level_idx].to(device).to(dtype)
stride_w, stride_h = self.strides[level_idx]
GridPriorsTRTOp.output = output
return grid_priors_trt(base_anchors, feat_h, feat_w, stride_h, stride_w) | This is a rewrite to replace ONNX anchor generator to TensorRT custom op. Args: ctx : The rewriter context featmap_size (tuple[int]): Size of the feature maps. level_idx (int): The index of corresponding feature map level. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. |
188,686 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils.constants import Backend
func_name='mmdet.models.task_modules.prior_generators.MlvlPointGenerator'
'.single_level_grid_priors',
backend=Backend.TENSORRT.value)
def mlvl_point_generator__single_level_grid_priors__tensorrt(
self,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda',
with_stride=False):
"""Rewrite `single_level_grid_priors` of `MlvlPointGenerator` as
onnx2tensorrt raise the error of shape inference for YOLOX with some
versions of TensorRT.
Args:
featmap_size (tuple[int]): Size of the feature maps, arrange as
(h, w).
level_idx (int): The index of corresponding feature map level.
dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.
device (str, optional): The device the tensor will be put on.
Defaults to 'cuda'.
with_stride (bool): Concatenate the stride to the last dimension
of points.
Return:
Tensor: Points of single feature levels.
The shape of tensor should be (N, 2) when with stride is
``False``, where N = width * height, width and height
are the sizes of the corresponding feature level,
and the last dimension 2 represent (coord_x, coord_y),
otherwise the shape should be (N, 4),
and the last dimension 4 represent
(coord_x, coord_y, stride_w, stride_h).
"""
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
shift_x = (torch.arange(0, feat_w, device=device) + self.offset) * stride_w
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_x = shift_x.to(dtype)
shift_y = (torch.arange(0, feat_h, device=device) + self.offset) * stride_h
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_y = shift_y.to(dtype)
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
if not with_stride:
shifts = torch.stack([shift_xx, shift_yy], dim=-1)
else:
# use `feat_w * feat_h` instead of `shift_xx.shape[0]` for TensorRT
stride_w = shift_xx.new_full((feat_w * feat_h, ), stride_w).to(dtype)
stride_h = shift_xx.new_full((feat_w * feat_h, ), stride_h).to(dtype)
shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1)
all_points = shifts.to(device)
return all_points
The provided code snippet includes necessary dependencies for implementing the `mlvl_point_generator__single_level_grid_priors__tensorrt` function. Write a Python function `def mlvl_point_generator__single_level_grid_priors__tensorrt( self, featmap_size, level_idx, dtype=torch.float32, device='cuda', with_stride=False)` to solve the following problem:
Rewrite `single_level_grid_priors` of `MlvlPointGenerator` as onnx2tensorrt raise the error of shape inference for YOLOX with some versions of TensorRT. Args: featmap_size (tuple[int]): Size of the feature maps, arrange as (h, w). level_idx (int): The index of corresponding feature map level. dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. with_stride (bool): Concatenate the stride to the last dimension of points. Return: Tensor: Points of single feature levels. The shape of tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h).
Here is the function:
def mlvl_point_generator__single_level_grid_priors__tensorrt(
self,
featmap_size,
level_idx,
dtype=torch.float32,
device='cuda',
with_stride=False):
"""Rewrite `single_level_grid_priors` of `MlvlPointGenerator` as
onnx2tensorrt raise the error of shape inference for YOLOX with some
versions of TensorRT.
Args:
featmap_size (tuple[int]): Size of the feature maps, arrange as
(h, w).
level_idx (int): The index of corresponding feature map level.
dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.
device (str, optional): The device the tensor will be put on.
Defaults to 'cuda'.
with_stride (bool): Concatenate the stride to the last dimension
of points.
Return:
Tensor: Points of single feature levels.
The shape of tensor should be (N, 2) when with stride is
``False``, where N = width * height, width and height
are the sizes of the corresponding feature level,
and the last dimension 2 represent (coord_x, coord_y),
otherwise the shape should be (N, 4),
and the last dimension 4 represent
(coord_x, coord_y, stride_w, stride_h).
"""
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
shift_x = (torch.arange(0, feat_w, device=device) + self.offset) * stride_w
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_x = shift_x.to(dtype)
shift_y = (torch.arange(0, feat_h, device=device) + self.offset) * stride_h
# keep featmap_size as Tensor instead of int, so that we
# can convert to ONNX correctly
shift_y = shift_y.to(dtype)
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
if not with_stride:
shifts = torch.stack([shift_xx, shift_yy], dim=-1)
else:
# use `feat_w * feat_h` instead of `shift_xx.shape[0]` for TensorRT
stride_w = shift_xx.new_full((feat_w * feat_h, ), stride_w).to(dtype)
stride_h = shift_xx.new_full((feat_w * feat_h, ), stride_h).to(dtype)
shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1)
all_points = shifts.to(device)
return all_points | Rewrite `single_level_grid_priors` of `MlvlPointGenerator` as onnx2tensorrt raise the error of shape inference for YOLOX with some versions of TensorRT. Args: featmap_size (tuple[int]): Size of the feature maps, arrange as (h, w). level_idx (int): The index of corresponding feature map level. dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. with_stride (bool): Concatenate the stride to the last dimension of points. Return: Tensor: Points of single feature levels. The shape of tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h). |
188,687 | import numpy as np
import torch
from mmdeploy.core import FUNCTION_REWRITER
def delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Rewrite `delta2bbox` for default backend.
Since the need of clip op with dynamic min and max, this function uses
clip_bboxes function to support dynamic shape.
Args:
ctx (ContextCaller): The context with additional information.
rois (Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 4) or (N, 4). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (tuple[int, int]): Maximum bounds for boxes, specifies
(H, W). Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp. When set to True,
the center of the prediction bounding box will be clamped to
avoid being too far away from the center of the anchor.
Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Return:
bboxes (Tensor): Boxes with shape (N, num_classes * 4) or (N, 4),
where 4 represent tl_x, tl_y, br_x, br_y.
"""
means = deltas.new_tensor(means).view(1, -1)
stds = deltas.new_tensor(stds).view(1, -1)
delta_shape = deltas.shape
reshaped_deltas = deltas.view(delta_shape[:-1] + (-1, 4))
denorm_deltas = reshaped_deltas * stds + means
dxy = denorm_deltas[..., :2]
dwh = denorm_deltas[..., 2:]
# fix openvino on torch1.13
xy1 = rois[..., :2].unsqueeze(2)
xy2 = rois[..., 2:].unsqueeze(2)
pxy = (xy1 + xy2) * 0.5
pwh = xy2 - xy1
dxy_wh = pwh * dxy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp)
dwh = torch.clamp(dwh, max=max_ratio)
else:
dwh = dwh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
half_gwh = pwh * dwh.exp() * 0.5
# Use network energy to shift the center of each roi
gxy = pxy + dxy_wh
# Convert center-xy/width/height to top-left, bottom-right
xy1 = gxy - half_gwh
xy2 = gxy + half_gwh
x1 = xy1[..., 0]
y1 = xy1[..., 1]
x2 = xy2[..., 0]
y2 = xy2[..., 1]
if clip_border and max_shape is not None:
from mmdeploy.codebase.mmdet.deploy import clip_bboxes
x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
return bboxes
func_name='mmdet.models.task_modules.coders.'
'delta_xywh_bbox_coder.delta2bbox',
backend='ncnn')
'DeltaXYWHBBoxCoder.decode',
def delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Rewrite `delta2bbox` for default backend.
Since the need of clip op with dynamic min and max, this function uses
clip_bboxes function to support dynamic shape.
Args:
ctx (ContextCaller): The context with additional information.
rois (Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 4) or (N, 4). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (tuple[int, int]): Maximum bounds for boxes, specifies
(H, W). Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp. When set to True,
the center of the prediction bounding box will be clamped to
avoid being too far away from the center of the anchor.
Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Return:
bboxes (Tensor): Boxes with shape (N, num_classes * 4) or (N, 4),
where 4 represent tl_x, tl_y, br_x, br_y.
"""
means = deltas.new_tensor(means).view(1, -1)
stds = deltas.new_tensor(stds).view(1, -1)
delta_shape = deltas.shape
reshaped_deltas = deltas.view(delta_shape[:-1] + (-1, 4))
denorm_deltas = reshaped_deltas * stds + means
dxy = denorm_deltas[..., :2]
dwh = denorm_deltas[..., 2:]
# fix openvino on torch1.13
xy1 = rois[..., :2].unsqueeze(2)
xy2 = rois[..., 2:].unsqueeze(2)
pxy = (xy1 + xy2) * 0.5
pwh = xy2 - xy1
dxy_wh = pwh * dxy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp)
dwh = torch.clamp(dwh, max=max_ratio)
else:
dwh = dwh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
half_gwh = pwh * dwh.exp() * 0.5
# Use network energy to shift the center of each roi
gxy = pxy + dxy_wh
# Convert center-xy/width/height to top-left, bottom-right
xy1 = gxy - half_gwh
xy2 = gxy + half_gwh
x1 = xy1[..., 0]
y1 = xy1[..., 1]
x2 = xy2[..., 0]
y2 = xy2[..., 1]
if clip_border and max_shape is not None:
from mmdeploy.codebase.mmdet.deploy import clip_bboxes
x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
return bboxes
func_name='mmdet.models.task_modules.coders.'
'delta_xywh_bbox_coder.delta2bbox',
backend='ncnn')
The provided code snippet includes necessary dependencies for implementing the `deltaxywhbboxcoder__decode` function. Write a Python function `def deltaxywhbboxcoder__decode(self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000)` to solve the following problem:
Rewrite `decode` of `DeltaXYWHBBoxCoder` for default backend. Rewrite this func to call `delta2bbox` directly. Args: bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4) pred_bboxes (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes.
Here is the function:
def deltaxywhbboxcoder__decode(self,
bboxes,
pred_bboxes,
max_shape=None,
wh_ratio_clip=16 / 1000):
"""Rewrite `decode` of `DeltaXYWHBBoxCoder` for default backend.
Rewrite this func to call `delta2bbox` directly.
Args:
bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4)
pred_bboxes (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
wh_ratio_clip (float, optional): The allowed ratio between
width and height.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(0) == bboxes.size(0)
if pred_bboxes.ndim == 3:
assert pred_bboxes.size(1) == bboxes.size(1)
from mmdet.models.task_modules.coders.delta_xywh_bbox_coder import \
delta2bbox
decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds,
max_shape, wh_ratio_clip, self.clip_border,
self.add_ctr_clamp, self.ctr_clamp)
return decoded_bboxes | Rewrite `decode` of `DeltaXYWHBBoxCoder` for default backend. Rewrite this func to call `delta2bbox` directly. Args: bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4) pred_bboxes (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. |
188,688 | import numpy as np
import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `delta2bbox__ncnn` function. Write a Python function `def delta2bbox__ncnn(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, clip_border=True, add_ctr_clamp=False, ctr_clamp=32)` to solve the following problem:
Rewrite `delta2bbox` for ncnn backend. Batch dimension is not supported by ncnn, but supported by pytorch. ncnn regards the lowest two dimensions as continuous address with byte alignment, so the lowest two dimensions are not absolutely independent. Reshape operator with -1 arguments should operates ncnn::Mat with dimension >= 3. Args: ctx (ContextCaller): The context with additional information. rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If rois shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float): Maximum aspect ratio for boxes. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Return: bboxes (Tensor): Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y.
Here is the function:
def delta2bbox__ncnn(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Rewrite `delta2bbox` for ncnn backend.
Batch dimension is not supported by ncnn, but supported by pytorch.
ncnn regards the lowest two dimensions as continuous address with byte
alignment, so the lowest two dimensions are not absolutely independent.
Reshape operator with -1 arguments should operates ncnn::Mat with
dimension >= 3.
Args:
ctx (ContextCaller): The context with additional information.
rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If rois shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
wh_ratio_clip (float): Maximum aspect ratio for boxes.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Return:
bboxes (Tensor): Boxes with shape (B, N, num_classes * 4) or (B, N, 4)
or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y,
br_x, br_y.
"""
means = deltas.new_tensor(means).view(1, 1, 1, -1).data
stds = deltas.new_tensor(stds).view(1, 1, 1, -1).data
delta_shape = deltas.shape
reshaped_deltas = deltas.view(delta_shape[:-1] + (-1, 4))
denorm_deltas = reshaped_deltas * stds + means
dxy = denorm_deltas[..., :2]
dwh = denorm_deltas[..., 2:]
xy1 = rois[..., None, :2]
xy2 = rois[..., None, 2:]
pxy = (xy1 + xy2) * 0.5
pwh = xy2 - xy1
dxy_wh = pwh * dxy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp)
dwh = torch.clamp(dwh, max=max_ratio)
else:
dwh = dwh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
half_gwh = pwh * dwh.exp() * 0.5
# Use network energy to shift the center of each roi
gxy = pxy + dxy_wh
# Convert center-xy/width/height to top-left, bottom-right
xy1 = gxy - half_gwh
xy2 = gxy + half_gwh
x1 = xy1[..., 0]
y1 = xy1[..., 1]
x2 = xy2[..., 0]
y2 = xy2[..., 1]
if clip_border and max_shape is not None:
from mmdeploy.codebase.mmdet.deploy import clip_bboxes
x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
return bboxes | Rewrite `delta2bbox` for ncnn backend. Batch dimension is not supported by ncnn, but supported by pytorch. ncnn regards the lowest two dimensions as continuous address with byte alignment, so the lowest two dimensions are not absolutely independent. Reshape operator with -1 arguments should operates ncnn::Mat with dimension >= 3. Args: ctx (ContextCaller): The context with additional information. rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If rois shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float): Maximum aspect ratio for boxes. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Return: bboxes (Tensor): Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. |
188,689 | import mmdet.structures.bbox.transforms
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `distancepointbboxcoder__decode` function. Write a Python function `def distancepointbboxcoder__decode(self, points, pred_bboxes, max_shape=None)` to solve the following problem:
Rewrite `mmdet.models.task_modules.coders.distance_point_bbox_coder. \ DistancePointBBoxCoder.decode` Decode distance prediction to bounding box. Args: ctx (ContextCaller): The context with additional information. self (DistancePointBBoxCoder): The instance of the class DistancePointBBoxCoder. points (Tensor): Shape (B, N, 2) or (N, 2). pred_bboxes (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]], and the length of max_shape should also be B. Default None. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4)
Here is the function:
def distancepointbboxcoder__decode(self, points, pred_bboxes, max_shape=None):
"""Rewrite `mmdet.models.task_modules.coders.distance_point_bbox_coder. \
DistancePointBBoxCoder.decode`
Decode distance prediction to bounding box.
Args:
ctx (ContextCaller): The context with additional information.
self (DistancePointBBoxCoder): The instance of the class
DistancePointBBoxCoder.
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
# Rewrite add mmdet.core.bbox.transforms to find correct
# rewriter, or you will not find correct rewriter.
return mmdet.structures.bbox.transforms.distance2bbox(
points, pred_bboxes, max_shape) | Rewrite `mmdet.models.task_modules.coders.distance_point_bbox_coder. \ DistancePointBBoxCoder.decode` Decode distance prediction to bounding box. Args: ctx (ContextCaller): The context with additional information. self (DistancePointBBoxCoder): The instance of the class DistancePointBBoxCoder. points (Tensor): Shape (B, N, 2) or (N, 2). pred_bboxes (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]], and the length of max_shape should also be B. Default None. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) |
188,690 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `tblr2bboxes` function. Write a Python function `def tblr2bboxes(priors, tblr, normalizer=4.0, normalize_by_wh=True, max_shape=None, clip_border=True)` to solve the following problem:
Rewrite `tblr2bboxes` for default backend. Since the need of clip op with dynamic min and max, this function uses clip_bboxes function to support dynamic shape. Args: ctx (ContextCaller): The context with additional information. priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) Shape: (N,4) or (B, N, 4). tblr (Tensor): Coords of network output in tblr form Shape: (N, 4) or (B, N, 4). normalizer (Sequence[float] | float): Normalization parameter of encoded boxes. By list, it represents the normalization factors at tblr dims. By float, it is the unified normalization factor at all dims. Default: 4.0 normalize_by_wh (bool): Whether the tblr coordinates have been normalized by the side length (wh) of prior bboxes. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Return: bboxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)
Here is the function:
def tblr2bboxes(priors,
tblr,
normalizer=4.0,
normalize_by_wh=True,
max_shape=None,
clip_border=True):
"""Rewrite `tblr2bboxes` for default backend.
Since the need of clip op with dynamic min and max, this function uses
clip_bboxes function to support dynamic shape.
Args:
ctx (ContextCaller): The context with additional information.
priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)
Shape: (N,4) or (B, N, 4).
tblr (Tensor): Coords of network output in tblr form
Shape: (N, 4) or (B, N, 4).
normalizer (Sequence[float] | float): Normalization parameter of
encoded boxes. By list, it represents the normalization factors at
tblr dims. By float, it is the unified normalization factor at all
dims. Default: 4.0
normalize_by_wh (bool): Whether the tblr coordinates have been
normalized by the side length (wh) of prior bboxes.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
Return:
bboxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)
"""
if not isinstance(normalizer, float):
normalizer = torch.tensor(normalizer, device=priors.device)
assert len(normalizer) == 4, 'Normalizer must have length = 4'
assert priors.size(0) == tblr.size(0)
if priors.ndim == 3:
assert priors.size(1) == tblr.size(1)
loc_decode = tblr * normalizer
prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2
if normalize_by_wh:
wh = priors[..., 2:4] - priors[..., 0:2]
w, h = torch.split(wh, 1, dim=-1)
# Inplace operation with slice would fail for exporting to ONNX
th = h * loc_decode[..., :2] # tb
tw = w * loc_decode[..., 2:] # lr
loc_decode = torch.cat([th, tw], dim=-1)
top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)
xmin = prior_centers[..., 0].unsqueeze(-1) - left
xmax = prior_centers[..., 0].unsqueeze(-1) + right
ymin = prior_centers[..., 1].unsqueeze(-1) - top
ymax = prior_centers[..., 1].unsqueeze(-1) + bottom
if clip_border and max_shape is not None:
from mmdeploy.codebase.mmdet.deploy import clip_bboxes
xmin, ymin, xmax, ymax = clip_bboxes(xmin, ymin, xmax, ymax, max_shape)
bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1).view(priors.size())
return bboxes | Rewrite `tblr2bboxes` for default backend. Since the need of clip op with dynamic min and max, this function uses clip_bboxes function to support dynamic shape. Args: ctx (ContextCaller): The context with additional information. priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) Shape: (N,4) or (B, N, 4). tblr (Tensor): Coords of network output in tblr form Shape: (N, 4) or (B, N, 4). normalizer (Sequence[float] | float): Normalization parameter of encoded boxes. By list, it represents the normalization factors at tblr dims. By float, it is the unified normalization factor at all dims. Default: 4.0 normalize_by_wh (bool): Whether the tblr coordinates have been normalized by the side length (wh) of prior bboxes. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Return: bboxes (Tensor): Boxes with shape (N, 4) or (B, N, 4) |
188,691 | from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from mmdet.structures.bbox import get_box_tensor
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
The provided code snippet includes necessary dependencies for implementing the `bbox_head__forward` function. Write a Python function `def bbox_head__forward(self, x)` to solve the following problem:
Rewrite `forward` for default backend. This function uses the specific `forward` function for the BBoxHead or ConvFCBBoxHead after adding marks. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. x (Tensor): Input image tensor. Returns: tuple(Tensor, Tensor): The (cls_score, bbox_pred). The cls_score has shape (N, num_det, num_cls) and the bbox_pred has shape (N, num_det, 4).
Here is the function:
def bbox_head__forward(self, x):
"""Rewrite `forward` for default backend.
This function uses the specific `forward` function for the BBoxHead
or ConvFCBBoxHead after adding marks.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
x (Tensor): Input image tensor.
Returns:
tuple(Tensor, Tensor): The (cls_score, bbox_pred). The cls_score
has shape (N, num_det, num_cls) and the bbox_pred has shape
(N, num_det, 4).
"""
ctx = FUNCTION_REWRITER.get_context()
@mark(
'bbox_head_forward',
inputs=['bbox_feats'],
outputs=['cls_score', 'bbox_pred'])
def __forward(self, x):
return ctx.origin_func(self, x)
return __forward(self, x) | Rewrite `forward` for default backend. This function uses the specific `forward` function for the BBoxHead or ConvFCBBoxHead after adding marks. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. x (Tensor): Input image tensor. Returns: tuple(Tensor, Tensor): The (cls_score, bbox_pred). The cls_score has shape (N, num_det, num_cls) and the bbox_pred has shape (N, num_det, 4). |
188,692 | from typing import List, Optional, Tuple
import torch
import torch.nn.functional as F
from mmdet.structures.bbox import get_box_tensor
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.mmcv.ops import multiclass_nms
The provided code snippet includes necessary dependencies for implementing the `bbox_head__predict_by_feat` function. Write a Python function `def bbox_head__predict_by_feat(self, rois: Tuple[Tensor], cls_scores: Tuple[Tensor], bbox_preds: Tuple[Tensor], batch_img_metas: List[dict], rcnn_test_cfg: Optional[ConfigDict] = None, rescale: bool = False) -> Tuple[Tensor]` to solve the following problem:
Rewrite `predict_by_feat` of `BBoxHead` for default backend. Transform network output for a batch into bbox predictions. Support `reg_class_agnostic == False` case. Args: rois (tuple[Tensor]): Tuple of boxes to be transformed. Each has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_scores (tuple[Tensor]): Tuple of box scores, each has shape (num_boxes, num_classes + 1). bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each has shape (num_boxes, num_classes * 4). batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def bbox_head__predict_by_feat(self,
rois: Tuple[Tensor],
cls_scores: Tuple[Tensor],
bbox_preds: Tuple[Tensor],
batch_img_metas: List[dict],
rcnn_test_cfg: Optional[ConfigDict] = None,
rescale: bool = False) -> Tuple[Tensor]:
"""Rewrite `predict_by_feat` of `BBoxHead` for default backend.
Transform network output for a batch into bbox predictions. Support
`reg_class_agnostic == False` case.
Args:
rois (tuple[Tensor]): Tuple of boxes to be transformed.
Each has shape (num_boxes, 5). last dimension 5 arrange as
(batch_index, x1, y1, x2, y2).
cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
(num_boxes, num_classes + 1).
bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
has shape (num_boxes, num_classes * 4).
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
- dets (Tensor): Classification bboxes and scores, has a shape
(num_instance, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
ctx = FUNCTION_REWRITER.get_context()
assert rois.ndim == 3, 'Only support export two stage ' \
'model to ONNX ' \
'with batch dimension. '
img_shape = batch_img_metas[0]['img_shape']
if self.custom_cls_channels:
scores = self.loss_cls.get_activation(cls_scores)
else:
scores = F.softmax(
cls_scores, dim=-1) if cls_scores is not None else None
if bbox_preds is not None:
# num_classes = 1 if self.reg_class_agnostic else self.num_classes
# if num_classes > 1:
# rois = rois.repeat_interleave(num_classes, dim=1)
bboxes = self.bbox_coder.decode(
rois[..., 1:], bbox_preds, max_shape=img_shape)
bboxes = get_box_tensor(bboxes)
else:
bboxes = rois[..., 1:].clone()
if img_shape is not None:
max_shape = bboxes.new_tensor(img_shape)[..., :2]
min_xy = bboxes.new_tensor(0)
max_xy = torch.cat([max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
batch_size = scores.shape[0]
device = scores.device
# ignore background class
scores = scores[..., :self.num_classes]
if not self.reg_class_agnostic:
# only keep boxes with the max scores
max_inds = scores.reshape(-1, self.num_classes).argmax(1, keepdim=True)
encode_size = self.bbox_coder.encode_size
bboxes = bboxes.reshape(-1, self.num_classes, encode_size)
dim0_inds = torch.arange(bboxes.shape[0], device=device).unsqueeze(-1)
bboxes = bboxes[dim0_inds, max_inds].reshape(batch_size, -1,
encode_size)
# get nms params
post_params = get_post_processing_params(ctx.cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = rcnn_test_cfg.nms.get('iou_threshold',
post_params.iou_threshold)
score_threshold = rcnn_test_cfg.get('score_thr',
post_params.score_threshold)
if torch.onnx.is_in_onnx_export():
pre_top_k = post_params.pre_top_k
else:
# For two stage partition post processing
pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \
else post_params.pre_top_k
keep_top_k = rcnn_test_cfg.get('max_per_img', post_params.keep_top_k)
nms_type = rcnn_test_cfg.nms.get('type')
dets, labels = multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
return dets, labels | Rewrite `predict_by_feat` of `BBoxHead` for default backend. Transform network output for a batch into bbox predictions. Support `reg_class_agnostic == False` case. Args: rois (tuple[Tensor]): Tuple of boxes to be transformed. Each has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_scores (tuple[Tensor]): Tuple of box scores, each has shape (num_boxes, num_classes + 1). bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each has shape (num_boxes, num_classes * 4). batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,693 | import torch
from mmcv.ops import RoIAlign
from torch.autograd import Function
from mmdeploy.core.optimizers import mark
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import get_backend
from mmdeploy.utils.constants import Backend
class MultiLevelRoiAlign(Function):
"""Create MMCVMultiLevelRoiAlign op.
This class is used to create a MultiLevelRoiAlign in ONNX for the TensorRT
backend.
"""
def symbolic(g, *args):
"""Symbolic function for creating onnx op."""
aligned = args[-1]
featmap_strides = args[-2]
finest_scale = args[-3]
roi_scale_factor = args[-4]
sampling_ratio = args[-5]
pool_mode = args[-6]
pool_mode_flag = 0 if pool_mode == 'max' else 1
output_size = args[-7]
inputs = args[:len(featmap_strides)]
rois = args[len(featmap_strides)]
return g.op(
'mmdeploy::MMCVMultiLevelRoiAlign',
rois,
*inputs,
output_height_i=output_size[1],
output_width_i=output_size[0],
pool_mode_i=pool_mode_flag,
sampling_ratio_i=sampling_ratio,
roi_scale_factor_f=roi_scale_factor,
finest_scale_i=finest_scale,
featmap_strides_f=featmap_strides,
aligned_i=aligned)
def forward(g, *args):
"""Run forward."""
# aligned = args[-1]
featmap_strides = args[-2]
# finest_scale = args[-3]
# roi_scale_factor = args[-4]
# sampling_ratio = args[-5]
output_size = args[-7]
inputs = args[:len(featmap_strides)]
rois = args[len(featmap_strides)]
num_proposals = rois.shape[0]
channel = inputs[0].shape[1]
return rois.new_zeros(
(num_proposals, channel, output_size[1], output_size[0]))
'mmdet.models.roi_heads.roi_extractors.'
'single_level_roi_extractor.SingleRoIExtractor.forward',
backend='tensorrt')
The provided code snippet includes necessary dependencies for implementing the `single_roi_extractor__forward__tensorrt` function. Write a Python function `def single_roi_extractor__forward__tensorrt(self, feats, rois, roi_scale_factor=None)` to solve the following problem:
Rewrite `forward` of `SingleRoIExtractor` for TensorRT backend. This function uses MMCVMultiLevelRoiAlign op for TensorRT deployment.
Here is the function:
def single_roi_extractor__forward__tensorrt(self,
feats,
rois,
roi_scale_factor=None):
"""Rewrite `forward` of `SingleRoIExtractor` for TensorRT backend.
This function uses MMCVMultiLevelRoiAlign op for TensorRT deployment.
"""
featmap_strides = self.featmap_strides
finest_scale = self.finest_scale
for roi_layer in self.roi_layers:
assert isinstance(
roi_layer,
RoIAlign), f'{type(roi_layer)} is not supported in TensorRT.'
roi_layer = self.roi_layers[0]
out_size = roi_layer.output_size
sampling_ratio = roi_layer.sampling_ratio
pool_mode = roi_layer.pool_mode
aligned = roi_layer.aligned
if roi_scale_factor is None:
roi_scale_factor = 1.0
featmap_strides = [float(s) for s in featmap_strides]
return MultiLevelRoiAlign.apply(*feats, rois, out_size, pool_mode,
sampling_ratio, roi_scale_factor,
finest_scale, featmap_strides, aligned) | Rewrite `forward` of `SingleRoIExtractor` for TensorRT backend. This function uses MMCVMultiLevelRoiAlign op for TensorRT deployment. |
188,694 | import torch
from mmcv.ops import RoIAlign
from torch.autograd import Function
from mmdeploy.core.optimizers import mark
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import get_backend
from mmdeploy.utils.constants import Backend
class AscendRoiExtractor(Function):
"""Create AscendRoiExtractor op.
This class is used to create a AscendRoiExtractor in ONNX for the Ascend
backend.
"""
def symbolic(g, *args):
"""Symbolic function for creating onnx op."""
aligned = args[-1]
featmap_strides = [1 / stride for stride in args[-2]]
finest_scale = args[-3]
roi_scale_factor = args[-4]
sampling_ratio = args[-5]
pool_mode = args[-6]
output_size = args[-7]
inputs = args[:len(featmap_strides)]
rois = args[len(featmap_strides)]
return g.op(
'mmdeploy::RoiExtractor',
*inputs,
rois,
pooled_height_i=output_size[1],
pooled_width_i=output_size[0],
pool_mode_s=pool_mode,
sample_num_i=sampling_ratio,
roi_scale_factor_f=roi_scale_factor,
finest_scale_i=finest_scale,
spatial_scale_f=featmap_strides,
aligned_i=aligned,
outputs=1)
def forward(ctx, *args):
"""Run forward."""
# aligned = args[-1]
featmap_strides = args[-2]
# finest_scale = args[-3]
# roi_scale_factor = args[-4]
# sampling_ratio = args[-5]
output_size = args[-7]
inputs = args[:len(featmap_strides)]
rois = args[len(featmap_strides)]
num_proposals = rois.shape[0]
channel = inputs[0].shape[1]
return rois.new_zeros(
(num_proposals, channel, output_size[1], output_size[0]))
'mmdet.models.roi_heads.roi_extractors.'
'single_level_roi_extractor.SingleRoIExtractor.forward',
backend='ascend')
The provided code snippet includes necessary dependencies for implementing the `single_roi_extractor__forward__ascend` function. Write a Python function `def single_roi_extractor__forward__ascend(self, feats, rois, roi_scale_factor=None)` to solve the following problem:
Rewrite `forward` of `SingleRoIExtractor` for Ascend backend. This function uses RoiExtractor op for Ascend deployment.
Here is the function:
def single_roi_extractor__forward__ascend(self,
feats,
rois,
roi_scale_factor=None):
"""Rewrite `forward` of `SingleRoIExtractor` for Ascend backend.
This function uses RoiExtractor op for Ascend deployment.
"""
featmap_strides = self.featmap_strides
finest_scale = self.finest_scale
for roi_layer in self.roi_layers:
assert isinstance(
roi_layer,
RoIAlign), f'{type(roi_layer)} is not supported in Ascend.'
roi_layer = self.roi_layers[0]
out_size = roi_layer.output_size
sampling_ratio = roi_layer.sampling_ratio
pool_mode = roi_layer.pool_mode
aligned = roi_layer.aligned
if roi_scale_factor is None:
roi_scale_factor = 1.0
featmap_strides = [float(s) for s in featmap_strides]
return AscendRoiExtractor.apply(*feats, rois, out_size, pool_mode,
sampling_ratio, roi_scale_factor,
finest_scale, featmap_strides, aligned) | Rewrite `forward` of `SingleRoIExtractor` for Ascend backend. This function uses RoiExtractor op for Ascend deployment. |
188,695 | import torch
from mmcv.ops import RoIAlign
from torch.autograd import Function
from mmdeploy.core.optimizers import mark
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import get_backend
from mmdeploy.utils.constants import Backend
class Backend(AdvancedEnum):
"""Define backend enumerations."""
PYTORCH = 'pytorch'
TENSORRT = 'tensorrt'
ONNXRUNTIME = 'onnxruntime'
PPLNN = 'pplnn'
NCNN = 'ncnn'
SNPE = 'snpe'
OPENVINO = 'openvino'
SDK = 'sdk'
TORCHSCRIPT = 'torchscript'
RKNN = 'rknn'
ASCEND = 'ascend'
COREML = 'coreml'
TVM = 'tvm'
VACC = 'vacc'
DEFAULT = 'default'
The provided code snippet includes necessary dependencies for implementing the `single_roi_extractor__forward` function. Write a Python function `def single_roi_extractor__forward(self, feats, rois, roi_scale_factor=None)` to solve the following problem:
Rewrite `forward` of SingleRoIExtractor for default backend. Rewrite this function to: 1. enable exporting to IR even though the input image contains no targets. Note that, `ScatterND` of onnx may conflict with `Reshape` if a tensor have a dim size of 0. Thus, we have to cat zeros to the dim 0 of `roi_feats` and recover back after all roi align finished. 2. this function adds mark for roi_extractor forward and remove unnecessary code of origin forward function when using ONNX as IR. 3. use the roi align in torhcvision to accelerate the inference.
Here is the function:
def single_roi_extractor__forward(self, feats, rois, roi_scale_factor=None):
"""Rewrite `forward` of SingleRoIExtractor for default backend.
Rewrite this function to:
1. enable exporting to IR even though the input
image contains no targets. Note that, `ScatterND` of onnx may conflict with
`Reshape` if a tensor have a dim size of 0. Thus, we have to cat zeros to
the dim 0 of `roi_feats` and recover back after all roi align finished.
2. this function adds mark for roi_extractor forward and remove
unnecessary code of origin forward function when using ONNX as IR.
3. use the roi align in torhcvision to accelerate the inference.
"""
ctx = FUNCTION_REWRITER.get_context(
'mmdet.models.roi_heads.SingleRoIExtractor.forward')
backend = get_backend(ctx.cfg)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(rois.shape[0], self.out_channels, *out_size)
if num_levels == 1:
assert len(rois) > 0, 'The number of rois should be positive'
if backend == Backend.TORCHSCRIPT or backend == Backend.COREML:
self.roi_layers[0].use_torchvision = True
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# concate zeros to rois and roi_feats for empty tensor cases
roi_feats = torch.cat(
(roi_feats.new_zeros(num_levels * 2,
*roi_feats.shape[-3:]), roi_feats))
rois = torch.cat((rois.new_zeros(num_levels * 2, 5), rois))
_tmp = torch.linspace(
0,
num_levels - 1,
num_levels,
dtype=target_lvls.dtype,
device=target_lvls.device)
target_lvls = torch.cat((_tmp, _tmp, target_lvls))
for i in range(num_levels):
mask = target_lvls == i
inds = mask.nonzero(as_tuple=False).squeeze(1)
rois_t = rois[inds]
# use the roi align in torhcvision
if backend == Backend.TORCHSCRIPT or backend == Backend.COREML:
self.roi_layers[i].use_torchvision = True
roi_feats_t = self.roi_layers[i](feats[i], rois_t)
roi_feats[inds] = roi_feats_t
# slice to recover original size
roi_feats = roi_feats[num_levels * 2:]
return roi_feats | Rewrite `forward` of SingleRoIExtractor for default backend. Rewrite this function to: 1. enable exporting to IR even though the input image contains no targets. Note that, `ScatterND` of onnx may conflict with `Reshape` if a tensor have a dim size of 0. Thus, we have to cat zeros to the dim 0 of `roi_feats` and recover back after all roi align finished. 2. this function adds mark for roi_extractor forward and remove unnecessary code of origin forward function when using ONNX as IR. 3. use the roi align in torhcvision to accelerate the inference. |
188,696 | import torch
from mmcv.ops import RoIAlign
from torch.autograd import Function
from mmdeploy.core.optimizers import mark
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import get_backend
from mmdeploy.utils.constants import Backend
class SingleRoIExtractorOpenVINO(Function):
"""This class adds support for ExperimentalDetectronROIFeatureExtractor
when exporting to OpenVINO.
The `forward` method returns the original output, which is calculated in
advance and added to the SingleRoIExtractorOpenVINO class. In addition, the
list of arguments is changed here to be more suitable for
ExperimentalDetectronROIFeatureExtractor.
"""
def __init__(self) -> None:
super().__init__()
def forward(g, output_size, featmap_strides, sample_num, rois, *feats):
"""Run forward."""
return SingleRoIExtractorOpenVINO.origin_output
def symbolic(g, output_size, featmap_strides, sample_num, rois, *feats):
"""Symbolic function for creating onnx op."""
from torch.onnx.symbolic_opset10 import _slice
rois = _slice(g, rois, axes=[1], starts=[1], ends=[5])
domain = 'org.openvinotoolkit'
op_name = 'ExperimentalDetectronROIFeatureExtractor'
roi_feats = g.op(
f'{domain}::{op_name}',
rois,
*feats,
output_size_i=output_size,
pyramid_scales_i=featmap_strides,
sampling_ratio_i=sample_num,
image_id_i=0,
distribute_rois_between_levels_i=1,
preserve_rois_order_i=0,
aligned_i=1,
outputs=1)
return roi_feats
'mmdet.models.roi_heads.roi_extractors.'
'single_level_roi_extractor.SingleRoIExtractor.forward',
backend='openvino')
The provided code snippet includes necessary dependencies for implementing the `single_roi_extractor__forward__openvino` function. Write a Python function `def single_roi_extractor__forward__openvino(self, feats, rois, roi_scale_factor=None)` to solve the following problem:
Replaces SingleRoIExtractor with SingleRoIExtractorOpenVINO when exporting to OpenVINO. This function uses ExperimentalDetectronROIFeatureExtractor for OpenVINO.
Here is the function:
def single_roi_extractor__forward__openvino(self,
feats,
rois,
roi_scale_factor=None):
"""Replaces SingleRoIExtractor with SingleRoIExtractorOpenVINO when
exporting to OpenVINO.
This function uses ExperimentalDetectronROIFeatureExtractor for OpenVINO.
"""
ctx = FUNCTION_REWRITER.get_context()
# Adding original output to SingleRoIExtractorOpenVINO.
state = torch._C._get_tracing_state()
origin_output = ctx.origin_func(self, feats, rois, roi_scale_factor)
setattr(SingleRoIExtractorOpenVINO, 'origin_output', origin_output)
torch._C._set_tracing_state(state)
output_size = self.roi_layers[0].output_size[0]
featmap_strides = self.featmap_strides
sample_num = self.roi_layers[0].sampling_ratio
args = (output_size, featmap_strides, sample_num, rois, *feats)
result = SingleRoIExtractorOpenVINO.apply(*args)
return result | Replaces SingleRoIExtractor with SingleRoIExtractorOpenVINO when exporting to OpenVINO. This function uses ExperimentalDetectronROIFeatureExtractor for OpenVINO. |
188,697 | import torch
from mmcv.ops import RoIAlign
from torch.autograd import Function
from mmdeploy.core.optimizers import mark
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import get_backend
from mmdeploy.utils.constants import Backend
class Backend(AdvancedEnum):
"""Define backend enumerations."""
PYTORCH = 'pytorch'
TENSORRT = 'tensorrt'
ONNXRUNTIME = 'onnxruntime'
PPLNN = 'pplnn'
NCNN = 'ncnn'
SNPE = 'snpe'
OPENVINO = 'openvino'
SDK = 'sdk'
TORCHSCRIPT = 'torchscript'
RKNN = 'rknn'
ASCEND = 'ascend'
COREML = 'coreml'
TVM = 'tvm'
VACC = 'vacc'
DEFAULT = 'default'
The provided code snippet includes necessary dependencies for implementing the `single_roi_extractor__forward__coreml` function. Write a Python function `def single_roi_extractor__forward__coreml(self, feats, rois, roi_scale_factor=None)` to solve the following problem:
Rewrite `forward` of SingleRoIExtractor for coreml.
Here is the function:
def single_roi_extractor__forward__coreml(self,
feats,
rois,
roi_scale_factor=None):
"""Rewrite `forward` of SingleRoIExtractor for coreml."""
ctx = FUNCTION_REWRITER.get_context()
backend = get_backend(ctx.cfg)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(rois.shape[0], self.out_channels, *out_size)
if num_levels == 1:
assert len(rois) > 0, 'The number of rois should be positive'
self.roi_layers[0].use_torchvision = True
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
# inds = mask.nonzero(as_tuple=False).squeeze(1)
rois_t = rois * mask.unsqueeze(-1)
# use the roi align in torhcvision
if backend == Backend.COREML:
self.roi_layers[i].use_torchvision = True
roi_feats_t = self.roi_layers[i](feats[i], rois_t)
roi_feats = roi_feats + roi_feats_t * (rois_t[:, -1] > 0).reshape(
-1, 1, 1, 1)
# slice to recover original size
return roi_feats | Rewrite `forward` of SingleRoIExtractor for coreml. |
188,698 | from typing import List, Tuple
import torch
from mmdet.utils import ConfigType
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `standard_roi_head__predict_bbox` function. Write a Python function `def standard_roi_head__predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: List[Tensor], rcnn_test_cfg: ConfigType, rescale: bool = False) -> List[Tensor]` to solve the following problem:
Rewrite `predict_bbox` of `StandardRoIHead` for default backend. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[Tensor]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def standard_roi_head__predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: List[Tensor],
rcnn_test_cfg: ConfigType,
rescale: bool = False) -> List[Tensor]:
"""Rewrite `predict_bbox` of `StandardRoIHead` for default backend.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[Tensor]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[Tensor]: Detection results of each image
after the post process.
Each item usually contains following keys.
- dets (Tensor): Classification bboxes and scores, has a shape
(num_instance, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
rois = rpn_results_list[0]
rois_dims = int(rois.shape[-1])
batch_index = torch.arange(
rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :rois_dims - 1]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, rois_dims)
bbox_results = self._bbox_forward(x, rois)
cls_scores = bbox_results['cls_score']
bbox_preds = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
cls_scores = cls_scores.reshape(batch_size, num_proposals_per_img,
cls_scores.size(-1))
bbox_preds = bbox_preds.reshape(batch_size, num_proposals_per_img,
bbox_preds.size(-1))
result_list = self.bbox_head.predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=rcnn_test_cfg,
rescale=rescale)
return result_list | Rewrite `predict_bbox` of `StandardRoIHead` for default backend. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[Tensor]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,699 | from typing import List, Tuple
import torch
from mmdet.utils import ConfigType
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `standard_roi_head__predict_mask` function. Write a Python function `def standard_roi_head__predict_mask(self, x: Tuple[Tensor], batch_img_metas: List[dict], results_list: List[Tensor], rescale: bool = False) -> List[Tensor]` to solve the following problem:
Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W).
Here is the function:
def standard_roi_head__predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: List[Tensor],
rescale: bool = False) -> List[Tensor]:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[Tensor]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
dets, det_labels = results_list
batch_size = dets.size(0)
det_bboxes = dets[..., :4]
# expand might lead to static shape, use broadcast instead
batch_index = torch.arange(
det_bboxes.size(0), device=det_bboxes.device).float().view(
-1, 1, 1) + det_bboxes.new_zeros(
(det_bboxes.size(0), det_bboxes.size(1))).unsqueeze(-1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results['mask_preds']
num_det = det_bboxes.shape[1]
segm_results = self.mask_head.predict_by_feat(
mask_preds,
results_list,
batch_img_metas,
self.test_cfg,
rescale=rescale)
segm_results = segm_results.reshape(batch_size, num_det,
segm_results.shape[-2],
segm_results.shape[-1])
return dets, det_labels, segm_results | Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[:obj:`InstanceData`]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). |
188,700 | from typing import List, Tuple
import torch
import torch.nn.functional as F
from mmengine import ConfigDict
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_backend
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
The provided code snippet includes necessary dependencies for implementing the `fcn_mask_head__predict_by_feat` function. Write a Python function `def fcn_mask_head__predict_by_feat(self, mask_preds: Tuple[Tensor], results_list: List[Tensor], batch_img_metas: List[dict], rcnn_test_cfg: ConfigDict, rescale: bool = False, activate_map: bool = False) -> List[Tensor]` to solve the following problem:
Transform a batch of output features extracted from the head into mask results. Args: mask_preds (tuple[Tensor]): Tuple of predicted foreground masks, each has shape (n, num_classes, h, w). results_list (list[Tensor]): Detection results of each image. batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. rescale (bool): If True, return boxes in original image space. Defaults to False. activate_map (book): Whether get results with augmentations test. If True, the `mask_preds` will not process with sigmoid. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - masks (Tensor): Has a shape (num_instances, H, W).
Here is the function:
def fcn_mask_head__predict_by_feat(self,
mask_preds: Tuple[Tensor],
results_list: List[Tensor],
batch_img_metas: List[dict],
rcnn_test_cfg: ConfigDict,
rescale: bool = False,
activate_map: bool = False) -> List[Tensor]:
"""Transform a batch of output features extracted from the head into mask
results.
Args:
mask_preds (tuple[Tensor]): Tuple of predicted foreground masks,
each has shape (n, num_classes, h, w).
results_list (list[Tensor]): Detection results of
each image.
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
activate_map (book): Whether get results with augmentations test.
If True, the `mask_preds` will not process with sigmoid.
Defaults to False.
Returns:
list[Tensor]: Detection results of each image
after the post process. Each item usually contains following keys.
- dets (Tensor): Classification scores, has a shape
(num_instance, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
ctx = FUNCTION_REWRITER.get_context()
ori_shape = batch_img_metas[0]['img_shape']
dets, det_labels = results_list
dets = dets.view(-1, 5)
det_labels = det_labels.view(-1)
backend = get_backend(ctx.cfg)
mask_preds = mask_preds.sigmoid()
bboxes = dets[:, :4]
labels = det_labels
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_preds.shape[0], device=bboxes.device)
mask_pred = mask_preds[box_inds, labels][:, None]
# grid sample is not supported by most engine
# so we add a flag to disable it.
mmdet_params = get_post_processing_params(ctx.cfg)
export_postprocess_mask = mmdet_params.get('export_postprocess_mask',
False)
if not export_postprocess_mask:
return mask_pred
masks, _ = _do_paste_mask(
mask_pred, bboxes, ori_shape[0], ori_shape[1], skip_empty=False)
if backend == Backend.TENSORRT:
return masks
if threshold >= 0:
masks = (masks >= threshold).to(dtype=torch.bool)
return masks | Transform a batch of output features extracted from the head into mask results. Args: mask_preds (tuple[Tensor]): Tuple of predicted foreground masks, each has shape (n, num_classes, h, w). results_list (list[Tensor]): Detection results of each image. batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. rescale (bool): If True, return boxes in original image space. Defaults to False. activate_map (book): Whether get results with augmentations test. If True, the `mask_preds` will not process with sigmoid. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - masks (Tensor): Has a shape (num_instances, H, W). |
188,701 | from typing import List, Tuple
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
def htc_roi_head__predict_mask(self,
x: Tuple[Tensor],
semantic_heat: Tensor,
batch_img_metas: List[dict],
results_list: List[Tensor],
rescale: bool = False) -> List[Tensor]:
dets, det_labels = results_list
batch_size = dets.size(0)
det_bboxes = dets[..., :4]
batch_index = torch.arange(
det_bboxes.size(0),
device=det_bboxes.device).float().view(-1, 1, 1).expand(
det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(
stage=-1,
x=x,
rois=mask_rois,
semantic_feat=semantic_heat,
training=False)
mask_preds = mask_results['mask_preds'][0]
num_det = det_bboxes.shape[1]
segm_results = self.mask_head[-1].predict_by_feat(
mask_preds,
results_list,
batch_img_metas,
self.test_cfg,
rescale=rescale)
segm_results = segm_results.reshape(batch_size, num_det,
segm_results.shape[-2],
segm_results.shape[-1])
return dets, det_labels, segm_results | null |
188,702 | from typing import List, Tuple
import torch
from mmdet.structures.bbox import get_box_tensor
from mmdet.utils import ConfigType
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `cascade_roi_head__predict_bbox` function. Write a Python function `def cascade_roi_head__predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: List[Tensor], rcnn_test_cfg: ConfigType, rescale: bool = False, **kwargs) -> List[Tensor]` to solve the following problem:
Rewrite `predict_bbox` of `CascadeRoIHead` for default backend. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[Tensor]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def cascade_roi_head__predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: List[Tensor],
rcnn_test_cfg: ConfigType,
rescale: bool = False,
**kwargs) -> List[Tensor]:
"""Rewrite `predict_bbox` of `CascadeRoIHead` for default backend.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[Tensor]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[Tensor]: Detection results of each image
after the post process.
Each item usually contains following keys.
- dets (Tensor): Classification bboxes and scores, has a shape
(num_instance, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
rois = rpn_results_list[0]
rois_dims = rois.shape[-1]
batch_index = torch.arange(
rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :rois_dims - 1]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, rois_dims)
ms_scores = []
max_shape = batch_img_metas[0]['img_shape']
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois, **kwargs)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
cls_score = cls_score.reshape(batch_size, num_proposals_per_img,
cls_score.size(-1))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, -1)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
assert self.bbox_head[i].reg_class_agnostic
new_rois = self.bbox_head[i].bbox_coder.decode(
rois[..., 1:], bbox_pred, max_shape=max_shape)
new_rois = get_box_tensor(new_rois)
rois = new_rois.reshape(-1, new_rois.shape[-1])
# Add dummy batch index
rois = torch.cat([batch_index.flatten(0, 1), rois], dim=-1)
cls_scores = sum(ms_scores) / float(len(ms_scores))
bbox_preds = bbox_pred.reshape(batch_size, num_proposals_per_img, -1)
rois = rois.reshape(batch_size, num_proposals_per_img, -1)
result_list = self.bbox_head[-1].predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=rcnn_test_cfg,
rescale=rescale)
return result_list | Rewrite `predict_bbox` of `CascadeRoIHead` for default backend. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[Tensor]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,703 | from typing import List, Tuple
import torch
from mmdet.structures.bbox import get_box_tensor
from mmdet.utils import ConfigType
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `cascade_roi_head__predict_mask` function. Write a Python function `def cascade_roi_head__predict_mask(self, x: Tuple[Tensor], batch_img_metas: List[dict], results_list: List[Tensor], rescale: bool = False) -> List[Tensor]` to solve the following problem:
Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[Tensor]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W).
Here is the function:
def cascade_roi_head__predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: List[Tensor],
rescale: bool = False) -> List[Tensor]:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[Tensor]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[Tensor]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
dets, det_labels = results_list
batch_size = dets.size(0)
det_bboxes = dets[..., :4]
batch_index = torch.arange(
det_bboxes.size(0),
device=det_bboxes.device).float().view(-1, 1, 1).expand(
det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
aug_masks = []
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
mask_pred = mask_results['mask_preds']
aug_masks.append(mask_pred)
mask_preds = sum(aug_masks) / len(aug_masks)
num_det = det_bboxes.shape[1]
segm_results = self.mask_head[-1].predict_by_feat(
mask_preds,
results_list,
batch_img_metas,
self.test_cfg,
rescale=rescale)
segm_results = segm_results.reshape(batch_size, num_det,
segm_results.shape[-2],
segm_results.shape[-1])
return dets, det_labels, segm_results | Perform forward propagation of the mask head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. results_list (list[Tensor]): Detection results of each image. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - masks (Tensor): Has a shape (num_instances, H, W). |
188,704 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_root_logger
def l2norm__forward__default(self, x):
"""Default rewriter for l2norm.
Implement with functinoal.normalize .
"""
return torch.nn.functional.normalize(
x, dim=1) * self.weight[None, :, None, None]
func_name='mmdet.models.necks.ssd_neck.L2Norm.forward',
backend=Backend.TENSORRT.value)
The provided code snippet includes necessary dependencies for implementing the `l2norm__forward__tensorrt` function. Write a Python function `def l2norm__forward__tensorrt(self, x)` to solve the following problem:
rewrite `l2norm` for TensorRT. TensorRT7 does not support dynamic clamp, which is used in normalize.
Here is the function:
def l2norm__forward__tensorrt(self, x):
"""rewrite `l2norm` for TensorRT.
TensorRT7 does not support dynamic clamp, which is used in normalize.
"""
ctx = FUNCTION_REWRITER.get_context()
logger = get_root_logger()
trt_version_major = 8
try:
import tensorrt as trt
from packaging import version
trt_version = version.parse(trt.__version__)
trt_version_major = trt_version.major
except Exception:
logger.warning('Can not get TensorRT version.')
if trt_version_major >= 8:
return l2norm__forward__default(self, x)
else:
return ctx.origin_func(self, x) | rewrite `l2norm` for TensorRT. TensorRT7 does not support dynamic clamp, which is used in normalize. |
188,705 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `maskformer__forward` function. Write a Python function `def maskformer__forward(self, batch_inputs, data_samples, mode='tensor', **kwargs)` to solve the following problem:
Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results. Defaults to True. Returns: tuple[Tensor, Tensor, Tensor, Tensor]: (bboxes, labels, masks, semseg), `bboxes` of shape [N, num_det, 5], `labels` of shape [N, num_det], `masks` of shape [N, roi_H, roi_W], `semseg` of shape [N, num_sem_class, sem_H, sem_W].
Here is the function:
def maskformer__forward(self,
batch_inputs,
data_samples,
mode='tensor',
**kwargs):
"""Rewrite `forward` for default backend. Support configured dynamic/static
shape for model input and return detection result as Tensor instead of
numpy array.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
tuple[Tensor, Tensor, Tensor, Tensor]:
(bboxes, labels, masks, semseg), `bboxes` of shape [N, num_det, 5],
`labels` of shape [N, num_det], `masks` of shape [N, roi_H, roi_W],
`semseg` of shape [N, num_sem_class, sem_H, sem_W].
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
# get origin input shape as tensor to support onnx dynamic shape
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
img_shape = torch._shape_as_tensor(batch_inputs)[2:]
if not is_dynamic_flag:
img_shape = [int(val) for val in img_shape]
# set the metainfo
# note that we can not use `set_metainfo`, deepcopy would crash the
# onnx trace.
for data_sample in data_samples:
data_sample.set_field(
name='img_shape', value=img_shape, field_type='metainfo')
data_sample.set_field(
name='batch_input_shape', value=img_shape, field_type='metainfo')
feats = self.extract_feat(batch_inputs)
mask_cls_results, mask_pred_results = self.panoptic_head.predict(
feats, data_samples)
# do not export panoptic_fusion_head
return mask_cls_results, mask_pred_results | Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results. Defaults to True. Returns: tuple[Tensor, Tensor, Tensor, Tensor]: (bboxes, labels, masks, semseg), `bboxes` of shape [N, num_det, 5], `labels` of shape [N, num_det], `masks` of shape [N, roi_H, roi_W], `semseg` of shape [N, num_sem_class, sem_H, sem_W]. |
188,706 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `two_stage_panoptic_segmentor__forward` function. Write a Python function `def two_stage_panoptic_segmentor__forward(self, batch_inputs, data_samples, mode='tensor', **kwargs)` to solve the following problem:
Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results. Defaults to True. Returns: tuple[Tensor, Tensor, Tensor, Tensor]: (bboxes, labels, masks, semseg), `bboxes` of shape [N, num_det, 5], `labels` of shape [N, num_det], `masks` of shape [N, roi_H, roi_W], `semseg` of shape [N, num_sem_class, sem_H, sem_W].
Here is the function:
def two_stage_panoptic_segmentor__forward(self,
batch_inputs,
data_samples,
mode='tensor',
**kwargs):
"""Rewrite `forward` for default backend. Support configured dynamic/static
shape for model input and return detection result as Tensor instead of
numpy array.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
tuple[Tensor, Tensor, Tensor, Tensor]:
(bboxes, labels, masks, semseg), `bboxes` of shape [N, num_det, 5],
`labels` of shape [N, num_det], `masks` of shape [N, roi_H, roi_W],
`semseg` of shape [N, num_sem_class, sem_H, sem_W].
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
# get origin input shape as tensor to support onnx dynamic shape
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
img_shape = torch._shape_as_tensor(batch_inputs)[2:].to(
batch_inputs.device)
if not is_dynamic_flag:
img_shape = [int(val) for val in img_shape]
# set the metainfo
# note that we can not use `set_metainfo`, deepcopy would crash the
# onnx trace.
for data_sample in data_samples:
data_sample.set_field(
name='img_shape', value=img_shape, field_type='metainfo')
data_sample.set_field(
name='batch_input_shape', value=img_shape, field_type='metainfo')
img_metas = [data_samples.metainfo for data_samples in data_samples]
x = self.extract_feat(batch_inputs)
if data_samples[0].get('proposals', None) is None:
proposals = self.rpn_head.predict(x, data_samples, rescale=False)
else:
proposals = [data_sample.proposals for data_sample in data_samples]
bboxes, labels, masks = self.roi_head.predict(
x, proposals, data_samples, rescale=False)
semseg = self.semantic_head.predict(x, img_metas, rescale=False)
# do not export panoptic_fusion_head
return bboxes, labels, masks, semseg | Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results. Defaults to True. Returns: tuple[Tensor, Tensor, Tensor, Tensor]: (bboxes, labels, masks, semseg), `bboxes` of shape [N, num_det, 5], `labels` of shape [N, num_det], `masks` of shape [N, roi_H, roi_W], `semseg` of shape [N, num_sem_class, sem_H, sem_W]. |
188,707 | import copy
import torch
from mmdet.models.detectors.base import ForwardResults
from mmdet.structures import DetDataSample
from mmdet.structures.det_data_sample import OptSampleList
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import is_dynamic_shape
def __predict_impl(self, batch_inputs, data_samples, rescale):
"""Rewrite and adding mark for `predict`.
Encapsulate this function for rewriting `predict` of DetectionTransformer.
1. Add mark for DetectionTransformer.
2. Support both dynamic and static export to onnx.
"""
img_feats = self.extract_feat(batch_inputs)
head_inputs_dict = self.forward_transformer(img_feats, data_samples)
results_list = self.bbox_head.predict(
**head_inputs_dict, rescale=rescale, batch_data_samples=data_samples)
return results_list
def _set_metainfo(data_samples, img_shape):
"""Set the metainfo.
Code in this function cannot be traced by fx.
"""
# fx can not trace deepcopy correctly
data_samples = copy.deepcopy(data_samples)
if data_samples is None:
data_samples = [DetDataSample()]
# note that we can not use `set_metainfo`, deepcopy would crash the
# onnx trace.
for data_sample in data_samples:
data_sample.set_field(
name='img_shape', value=img_shape, field_type='metainfo')
return data_samples
'mmdet.models.detectors.base_detr.DetectionTransformer.forward')
The provided code snippet includes necessary dependencies for implementing the `detection_transformer__forward` function. Write a Python function `def detection_transformer__forward(self, batch_inputs: torch.Tensor, data_samples: OptSampleList = None, rescale: bool = True, **kwargs) -> ForwardResults` to solve the following problem:
Rewrite `predict` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (Boolean): rescale result or not. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def detection_transformer__forward(self,
batch_inputs: torch.Tensor,
data_samples: OptSampleList = None,
rescale: bool = True,
**kwargs) -> ForwardResults:
"""Rewrite `predict` for default backend.
Support configured dynamic/static shape for model input and return
detection result as Tensor instead of numpy array.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (Boolean): rescale result or not.
Returns:
tuple[Tensor]: Detection results of the
input images.
- dets (Tensor): Classification bboxes and scores.
Has a shape (num_instances, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
# get origin input shape as tensor to support onnx dynamic shape
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
img_shape = torch._shape_as_tensor(batch_inputs)[2:].to(
batch_inputs.device)
if not is_dynamic_flag:
img_shape = [int(val) for val in img_shape]
# set the metainfo
data_samples = _set_metainfo(data_samples, img_shape)
return __predict_impl(self, batch_inputs, data_samples, rescale) | Rewrite `predict` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (Boolean): rescale result or not. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,708 | import torch
from mmdet.models.detectors.base import ForwardResults
from mmdet.structures import DetDataSample
from mmdet.structures.det_data_sample import OptSampleList
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import is_dynamic_shape
def _set_metainfo(data_samples, img_shape):
The provided code snippet includes necessary dependencies for implementing the `single_stage_detector__forward` function. Write a Python function `def single_stage_detector__forward(self, batch_inputs: torch.Tensor, data_samples: OptSampleList = None, mode: str = 'tensor', **kwargs) -> ForwardResults` to solve the following problem:
Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. mode (str): export mode, not used. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def single_stage_detector__forward(self,
batch_inputs: torch.Tensor,
data_samples: OptSampleList = None,
mode: str = 'tensor',
**kwargs) -> ForwardResults:
"""Rewrite `forward` for default backend.
Support configured dynamic/static shape for model input and return
detection result as Tensor instead of numpy array.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
mode (str): export mode, not used.
Returns:
tuple[Tensor]: Detection results of the
input images.
- dets (Tensor): Classification bboxes and scores.
Has a shape (num_instances, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
# get origin input shape as tensor to support onnx dynamic shape
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
img_shape = torch._shape_as_tensor(batch_inputs)[2:]
if not is_dynamic_flag:
img_shape = [int(val) for val in img_shape]
# set the metainfo
data_samples = _set_metainfo(data_samples, img_shape)
return __forward_impl(self, batch_inputs, data_samples=data_samples) | Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. mode (str): export mode, not used. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,709 | import torch
from mmdet.models.detectors.base import ForwardResults
from mmdet.structures.det_data_sample import OptSampleList
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `two_stage_detector__extract_feat` function. Write a Python function `def two_stage_detector__extract_feat(self, img)` to solve the following problem:
Rewrite `extract_feat` for default backend. This function uses the specific `extract_feat` function for the two stage detector after adding marks. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. img (Tensor | List[Tensor]): Input image tensor(s). Returns: list[Tensor]: Each item with shape (N, C, H, W) corresponds one level of backbone and neck features.
Here is the function:
def two_stage_detector__extract_feat(self, img):
"""Rewrite `extract_feat` for default backend.
This function uses the specific `extract_feat` function for the two
stage detector after adding marks.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
img (Tensor | List[Tensor]): Input image tensor(s).
Returns:
list[Tensor]: Each item with shape (N, C, H, W) corresponds one
level of backbone and neck features.
"""
ctx = FUNCTION_REWRITER.get_context()
@mark('extract_feat', inputs='img', outputs='feat')
def __extract_feat_impl(self, img):
return ctx.origin_func(self, img)
return __extract_feat_impl(self, img) | Rewrite `extract_feat` for default backend. This function uses the specific `extract_feat` function for the two stage detector after adding marks. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. img (Tensor | List[Tensor]): Input image tensor(s). Returns: list[Tensor]: Each item with shape (N, C, H, W) corresponds one level of backbone and neck features. |
188,710 | import torch
from mmdet.models.detectors.base import ForwardResults
from mmdet.structures.det_data_sample import OptSampleList
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `two_stage_detector__forward` function. Write a Python function `def two_stage_detector__forward(self, batch_inputs: torch.Tensor, data_samples: OptSampleList = None, mode: str = 'tensor', **kwargs) -> ForwardResults` to solve the following problem:
Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. mode (str): export mode, not used. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def two_stage_detector__forward(self,
batch_inputs: torch.Tensor,
data_samples: OptSampleList = None,
mode: str = 'tensor',
**kwargs) -> ForwardResults:
"""Rewrite `forward` for default backend.
Support configured dynamic/static shape for model input and return
detection result as Tensor instead of numpy array.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
mode (str): export mode, not used.
Returns:
tuple[Tensor]: Detection results of the
input images.
- dets (Tensor): Classification bboxes and scores.
Has a shape (num_instances, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
# get origin input shape as tensor to support onnx dynamic shape
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
img_shape = torch._shape_as_tensor(batch_inputs)[2:]
if not is_dynamic_flag:
img_shape = [int(val) for val in img_shape]
# set the metainfo
# note that we can not use `set_metainfo`, deepcopy would crash the
# onnx trace.
for data_sample in data_samples:
data_sample.set_field(
name='img_shape', value=img_shape, field_type='metainfo')
x = self.extract_feat(batch_inputs)
if data_samples[0].get('proposals', None) is None:
rpn_results_list = self.rpn_head.predict(
x, data_samples, rescale=False)
else:
rpn_results_list = [
data_sample.proposals for data_sample in data_samples
]
output = self.roi_head.predict(
x, rpn_results_list, data_samples, rescale=False)
return output | Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. mode (str): export mode, not used. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,711 | import torch
from mmdet.models.detectors.base import ForwardResults
from mmdet.structures.det_data_sample import OptSampleList
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import is_dynamic_shape
from .single_stage import _set_metainfo
def __forward_impl_instance_seg(self,
batch_inputs,
data_samples,
rescale=True,
**kwargs):
"""Rewrite and adding mark for `forward`.
Encapsulate this function for rewriting `forward` of BaseDetector.
1. Add mark for BaseDetector.
2. Support both dynamic and static export to onnx.
"""
x = self.extract_feat(batch_inputs)
if self.with_bbox:
# the bbox branch does not need to be scaled to the original
# image scale, because the mask branch will scale both bbox
# and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.bbox_head.predict(
x, data_samples, rescale=bbox_rescale)
else:
results_list = None
mask_outs = self.mask_head.predict(
x, data_samples, rescale=rescale, results_list=results_list)
return mask_outs
'mmdet.models.detectors.single_stage_instance_seg.'
'SingleStageInstanceSegmentor.forward')
"""Rewrite and adding mark for `forward`.
Encapsulate this function for rewriting `forward` of BaseDetector.
1. Add mark for BaseDetector.
2. Support both dynamic and static export to onnx.
"""
def _set_metainfo(data_samples, img_shape):
"""Set the metainfo.
Code in this function cannot be traced by fx.
"""
# note that we can not use `set_metainfo`, deepcopy would crash the
# onnx trace.
'mmdet.models.detectors.single_stage.SingleStageDetector.forward')
The provided code snippet includes necessary dependencies for implementing the `single_stage_instance_segmentor__forward` function. Write a Python function `def single_stage_instance_segmentor__forward( self, batch_inputs: torch.Tensor, data_samples: OptSampleList = None, mode: str = 'tensor', **kwargs) -> ForwardResults` to solve the following problem:
Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results. Defaults to True. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def single_stage_instance_segmentor__forward(
self,
batch_inputs: torch.Tensor,
data_samples: OptSampleList = None,
mode: str = 'tensor',
**kwargs) -> ForwardResults:
"""Rewrite `forward` for default backend.
Support configured dynamic/static shape for model input and return
detection result as Tensor instead of numpy array.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
tuple[Tensor]: Detection results of the
input images.
- dets (Tensor): Classification bboxes and scores.
Has a shape (num_instances, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
# get origin input shape as tensor to support onnx dynamic shape
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
img_shape = torch._shape_as_tensor(batch_inputs)[2:]
if not is_dynamic_flag:
img_shape = [int(val) for val in img_shape]
# set the metainfo
data_samples = _set_metainfo(data_samples, img_shape)
return __forward_impl_instance_seg(
self, batch_inputs, data_samples=data_samples, **kwargs) | Rewrite `forward` for default backend. Support configured dynamic/static shape for model input and return detection result as Tensor instead of numpy array. Args: batch_inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool): Whether to rescale the results. Defaults to True. Returns: tuple[Tensor]: Detection results of the input images. - dets (Tensor): Classification bboxes and scores. Has a shape (num_instances, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,712 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `get_topk_from_heatmap__default` function. Write a Python function `def get_topk_from_heatmap__default(scores, k=20)` to solve the following problem:
Get top k positions from heatmap. Replace view(batch, -1) with flatten
Here is the function:
def get_topk_from_heatmap__default(scores, k=20):
"""Get top k positions from heatmap.
Replace view(batch, -1) with flatten
"""
height, width = scores.size()[2:]
topk_scores, topk_inds = torch.topk(scores.flatten(1), k)
topk_clses = topk_inds // (height * width)
topk_inds = topk_inds % (height * width)
topk_ys = topk_inds // width
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs | Get top k positions from heatmap. Replace view(batch, -1) with flatten |
188,713 | from copy import deepcopy
from typing import Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmdet3d.structures import get_box_type
from mmengine import Config
from mmengine.dataset import Compose, pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task
from .mmdet3d import MMDET3D_TASK
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
if 'metainfo' in dataset_cfg:
return dataset_cfg.metainfo
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |
188,714 | from typing import Any, Dict, List, Optional, Sequence, Union
import torch
from mmdet3d.structures.det3d_data_sample import SampleList
from mmengine import Config
from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement, InstanceData
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
load_config)
__BACKEND_MODEL = Registry('backend_mono_detectors')
The provided code snippet includes necessary dependencies for implementing the `build_mono_detection_model` function. Write a Python function `def build_mono_detection_model( model_files: Sequence[str], model_cfg: Union[str, Config], deploy_cfg: Union[str, Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build monocular 3d object detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: VoxelDetectionModel: Detector for a configured backend.
Here is the function:
def build_mono_detection_model(
model_files: Sequence[str],
model_cfg: Union[str, Config],
deploy_cfg: Union[str, Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build monocular 3d object detection model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | Config): Input model config file or Config
object.
deploy_cfg (str | Config): Input deployment config file or
Config object.
device (str): Device to input model
data_preprocessor (BaseDataPreprocessor | Config): The data
preprocessor of the model.
Returns:
VoxelDetectionModel: Detector for a configured backend.
"""
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_detector = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
model_cfg=model_cfg,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_detector | Build monocular 3d object detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: VoxelDetectionModel: Detector for a configured backend. |
188,715 | from typing import Any, Dict, List, Optional, Sequence, Union
import mmcv
import torch
from mmdet3d.structures.det3d_data_sample import SampleList
from mmengine import Config
from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement, InstanceData
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
load_config)
__BACKEND_MODEL = Registry('backend_voxel_detectors')
The provided code snippet includes necessary dependencies for implementing the `build_voxel_detection_model` function. Write a Python function `def build_voxel_detection_model( model_files: Sequence[str], model_cfg: Union[str, Config], deploy_cfg: Union[str, Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build 3d voxel object detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: VoxelDetectionModel: Detector for a configured backend.
Here is the function:
def build_voxel_detection_model(
model_files: Sequence[str],
model_cfg: Union[str, Config],
deploy_cfg: Union[str, Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build 3d voxel object detection model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | Config): Input model config file or Config
object.
deploy_cfg (str | Config): Input deployment config file or
Config object.
device (str): Device to input model
data_preprocessor (BaseDataPreprocessor | Config): The data
preprocessor of the model.
Returns:
VoxelDetectionModel: Detector for a configured backend.
"""
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_detector = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
model_cfg=model_cfg,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_detector | Build 3d voxel object detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: VoxelDetectionModel: Detector for a configured backend. |
188,716 | import os
from copy import deepcopy
from typing import Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmdet3d.structures import get_box_type
from mmengine import Config
from mmengine.dataset import Compose, pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task
from .mmdet3d import MMDET3D_TASK
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
if 'metainfo' in dataset_cfg:
return dataset_cfg.metainfo
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |
188,717 | from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `singlestagemono3ddetector__forward` function. Write a Python function `def singlestagemono3ddetector__forward(self, inputs: Tensor, **kwargs)` to solve the following problem:
Rewrite to support feed inputs of Tensor type. Args: inputs (Tensor): Input image Returns: list: two torch.Tensor
Here is the function:
def singlestagemono3ddetector__forward(self, inputs: Tensor, **kwargs):
"""Rewrite to support feed inputs of Tensor type.
Args:
inputs (Tensor): Input image
Returns:
list: two torch.Tensor
"""
x = self.extract_feat({'imgs': inputs})
results = self.bbox_head.forward(x)
return results[0], results[1] | Rewrite to support feed inputs of Tensor type. Args: inputs (Tensor): Input image Returns: list: two torch.Tensor |
188,718 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_ir_config
The provided code snippet includes necessary dependencies for implementing the `mvxtwostagedetector__extract_img_feat` function. Write a Python function `def mvxtwostagedetector__extract_img_feat(self, img: torch.Tensor) -> dict` to solve the following problem:
Extract features of images.
Here is the function:
def mvxtwostagedetector__extract_img_feat(self, img: torch.Tensor) -> dict:
"""Extract features of images."""
if self.with_img_backbone and img is not None:
if img.dim() == 5 and img.size(0) == 1:
img.squeeze_()
elif img.dim() == 5 and img.size(0) > 1:
B, N, C, H, W = img.size()
img = img.view(B * N, C, H, W)
img_feats = self.img_backbone(img)
else:
return None
if self.with_img_neck:
img_feats = self.img_neck(img_feats)
return img_feats | Extract features of images. |
188,719 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_ir_config
The provided code snippet includes necessary dependencies for implementing the `mvxtwostagedetector__extract_feat` function. Write a Python function `def mvxtwostagedetector__extract_feat(self, batch_inputs_dict: dict) -> tuple` to solve the following problem:
Rewrite this func to remove voxelize op. Args: batch_inputs_dict (dict): Input dict comprises `voxels`, `num_points` and `coors` Returns: tuple(torch.Tensor) : image feature and points feather.
Here is the function:
def mvxtwostagedetector__extract_feat(self, batch_inputs_dict: dict) -> tuple:
"""Rewrite this func to remove voxelize op.
Args:
batch_inputs_dict (dict): Input dict comprises `voxels`, `num_points`
and `coors`
Returns:
tuple(torch.Tensor) : image feature and points feather.
"""
voxel_dict = batch_inputs_dict.get('voxels', None)
imgs = batch_inputs_dict.get('imgs', None)
points = batch_inputs_dict.get('points', None)
img_feats = self.extract_img_feat(imgs)
pts_feats = self.extract_pts_feat(
voxel_dict, points=points, img_feats=img_feats)
return (img_feats, pts_feats) | Rewrite this func to remove voxelize op. Args: batch_inputs_dict (dict): Input dict comprises `voxels`, `num_points` and `coors` Returns: tuple(torch.Tensor) : image feature and points feather. |
188,720 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_ir_config
The provided code snippet includes necessary dependencies for implementing the `mvxtwostagedetector__forward` function. Write a Python function `def mvxtwostagedetector__forward(self, voxels: torch.Tensor, num_points: torch.Tensor, coors: torch.Tensor, **kwargs)` to solve the following problem:
Rewrite this func to remove voxelize op. Args: voxels (Tensor): input voxels num_points (Tensor): input num_points coors (Tensor): input coors Returns: tuple: A tuple of classification scores, bbox and direction classification prediction. - cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * C. - dir_cls_preds (list[Tensor|None]): Direction classification predictions for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 2.
Here is the function:
def mvxtwostagedetector__forward(self, voxels: torch.Tensor,
num_points: torch.Tensor, coors: torch.Tensor,
**kwargs):
"""Rewrite this func to remove voxelize op.
Args:
voxels (Tensor): input voxels
num_points (Tensor): input num_points
coors (Tensor): input coors
Returns:
tuple: A tuple of classification scores, bbox and direction
classification prediction.
- cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, the channels number
is num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, the channels number
is num_base_priors * C.
- dir_cls_preds (list[Tensor|None]): Direction classification
predictions for all scale levels, each is a 4D-tensor,
the channels number is num_base_priors * 2.
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
batch_inputs_dict = {
'voxels': {
'voxels': voxels,
'num_points': num_points,
'coors': coors
}
}
_, pts_feats = self.extract_feat(batch_inputs_dict=batch_inputs_dict)
outs = self.pts_bbox_head(pts_feats)
if type(outs[0][0]) is dict:
bbox_preds, scores, dir_scores = [], [], []
for task_res in outs:
bbox_preds.append(task_res[0]['reg'])
bbox_preds.append(task_res[0]['height'])
bbox_preds.append(task_res[0]['dim'])
if 'vel' in task_res[0].keys():
bbox_preds.append(task_res[0]['vel'])
scores.append(task_res[0]['heatmap'])
dir_scores.append(task_res[0]['rot'])
bbox_preds = torch.cat(bbox_preds, dim=1)
scores = torch.cat(scores, dim=1)
dir_scores = torch.cat(dir_scores, dim=1)
return scores, bbox_preds, dir_scores
else:
preds = []
expect_names = []
for i in range(len(outs[0])):
preds += [outs[0][i], outs[1][i], outs[2][i]]
expect_names += [
f'cls_score{i}', f'bbox_pred{i}', f'dir_cls_pred{i}'
]
# check if output_names is set correctly.
onnx_cfg = get_ir_config(deploy_cfg)
output_names = onnx_cfg['output_names']
if output_names != list(expect_names):
raise RuntimeError(f'`output_names` should be {expect_names} '
f'but given {output_names}\n'
f'Deploy config:\n{deploy_cfg.pretty_text}')
return tuple(preds) | Rewrite this func to remove voxelize op. Args: voxels (Tensor): input voxels num_points (Tensor): input num_points coors (Tensor): input coors Returns: tuple: A tuple of classification scores, bbox and direction classification prediction. - cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * C. - dir_cls_preds (list[Tensor|None]): Direction classification predictions for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 2. |
188,721 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `pointpillarsscatter__forward` function. Write a Python function `def pointpillarsscatter__forward(self, voxel_features, coors, batch_size=1)` to solve the following problem:
Scatter features of single sample. Args: voxel_features (torch.Tensor): Voxel features from voxel encoder layer. coors (torch.Tensor): Coordinates of each voxel. The first column indicates the sample ID. batch_size (int): Number of samples in the current batch, batch_size=1 by default.
Here is the function:
def pointpillarsscatter__forward(self, voxel_features, coors, batch_size=1):
"""Scatter features of single sample.
Args:
voxel_features (torch.Tensor): Voxel features from voxel encoder layer.
coors (torch.Tensor): Coordinates of each voxel.
The first column indicates the sample ID.
batch_size (int): Number of samples in the current batch, batch_size=1
by default.
"""
canvas = torch.zeros(
self.in_channels,
self.nx * self.ny,
dtype=voxel_features.dtype,
device=voxel_features.device)
indices = coors[:, 2] * self.nx + coors[:, 3]
indices = indices.long()
voxels = voxel_features.t()
# Now scatter the blob back to the canvas.
canvas.scatter_(
dim=1, index=indices.expand(canvas.shape[0], -1), src=voxels)
# Undo the column stacking to final 4-dim tensor
canvas = canvas.view(1, self.in_channels, self.ny, self.nx)
return canvas | Scatter features of single sample. Args: voxel_features (torch.Tensor): Voxel features from voxel encoder layer. coors (torch.Tensor): Coordinates of each voxel. The first column indicates the sample ID. batch_size (int): Number of samples in the current batch, batch_size=1 by default. |
188,722 | from typing import List, Tuple
import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `basedetector__forward` function. Write a Python function `def basedetector__forward(self, voxels: torch.Tensor, num_points: torch.Tensor, coors: torch.Tensor, data_samples=None, **kwargs) -> Tuple[List[torch.Tensor]]` to solve the following problem:
Extract features of images.
Here is the function:
def basedetector__forward(self,
voxels: torch.Tensor,
num_points: torch.Tensor,
coors: torch.Tensor,
data_samples=None,
**kwargs) -> Tuple[List[torch.Tensor]]:
"""Extract features of images."""
batch_inputs_dict = {
'voxels': {
'voxels': voxels,
'num_points': num_points,
'coors': coors
}
}
return self._forward(batch_inputs_dict, data_samples, **kwargs) | Extract features of images. |
188,723 | import torch
from mmdet3d.models.voxel_encoders.utils import get_paddings_indicator
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `pillar_encoder__forward` function. Write a Python function `def pillar_encoder__forward(self, features, num_points, coors, *args, **kwargs)` to solve the following problem:
Rewrite this func to optimize node. Modify the code at _with_voxel_center and use slice instead of the original operation. Args: features (torch.Tensor): Point features or raw points in shape (N, M, C). num_points (torch.Tensor): Number of points in each pillar. coors (torch.Tensor): Coordinates of each voxel. Returns: torch.Tensor: Features of pillars.
Here is the function:
def pillar_encoder__forward(self, features, num_points, coors, *args,
**kwargs):
"""Rewrite this func to optimize node. Modify the code at
_with_voxel_center and use slice instead of the original operation.
Args:
features (torch.Tensor): Point features or raw points in shape
(N, M, C).
num_points (torch.Tensor): Number of points in each pillar.
coors (torch.Tensor): Coordinates of each voxel.
Returns:
torch.Tensor: Features of pillars.
"""
features_ls = [features]
# Find distance of x, y, and z from cluster center
if self._with_cluster_center:
points_mean = features[:, :, :3].sum(
dim=1, keepdim=True) / num_points.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean
features_ls.append(f_cluster)
# Find distance of x, y, and z from pillar center
device = features.device
if self._with_voxel_center:
if not self.legacy:
f_center = features[..., :3] - (coors[..., 1:] * torch.tensor(
[self.vz, self.vy, self.vx]).to(device) + torch.tensor([
self.z_offset, self.y_offset, self.x_offset
]).to(device)).unsqueeze(1).flip(2)
else:
f_center = features[..., :3] - (coors[..., 1:] * torch.tensor(
[self.vz, self.vy, self.vx]).to(device) + torch.tensor([
self.z_offset, self.y_offset, self.x_offset
]).to(device)).unsqueeze(1).flip(2)
features_ls[0] = torch.cat((f_center, features[..., 3:]), dim=-1)
features_ls.append(f_center)
if self._with_distance:
points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
# Combine together feature decorations
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether
# pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
for pfn in self.pfn_layers:
features = pfn(features, num_points)
return features.squeeze(1) | Rewrite this func to optimize node. Modify the code at _with_voxel_center and use slice instead of the original operation. Args: features (torch.Tensor): Point features or raw points in shape (N, M, C). num_points (torch.Tensor): Number of points in each pillar. coors (torch.Tensor): Coordinates of each voxel. Returns: torch.Tensor: Features of pillars. |
188,724 | from typing import List, Optional, Sequence, Union
import mmengine
import torch
from mmagic.structures import DataSample
from mmengine import Config
from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement
from torch import nn
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
get_root_logger, load_config)
__BACKEND_MODEL = Registry('backend_models')
The provided code snippet includes necessary dependencies for implementing the `build_super_resolution_model` function. Write a Python function `def build_super_resolution_model( model_files: Sequence[str], model_cfg: Union[str, mmengine.Config], deploy_cfg: Union[str, mmengine.Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build super resolution model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: End2EndModel: Super Resolution model for a configured backend.
Here is the function:
def build_super_resolution_model(
model_files: Sequence[str],
model_cfg: Union[str, mmengine.Config],
deploy_cfg: Union[str, mmengine.Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build super resolution model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | Config): Input model config file or Config
object.
deploy_cfg (str | Config): Input deployment config file or
Config object.
device (str): Device to input model
data_preprocessor (BaseDataPreprocessor | Config): The data
preprocessor of the model.
Returns:
End2EndModel: Super Resolution model for a configured backend.
"""
model_cfg = load_config(model_cfg)[0]
deploy_cfg = load_config(deploy_cfg)[0]
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_model = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
model_cfg=model_cfg,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_model | Build super resolution model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: End2EndModel: Super Resolution model for a configured backend. |
188,725 | from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.codebase.mmagic.deploy.mmagic import MMAGIC_TASK
from mmdeploy.utils import Task, get_input_shape
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: mmengine.Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: mmengine.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (mmengine.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmengine.Config: the model config after processing.
"""
config = deepcopy(model_cfg)
if not hasattr(config, 'test_pipeline'):
config.__setattr__('test_pipeline', config.val_pipeline)
keys_to_remove = ['gt', 'gt_path']
# MMagic doesn't support LoadImageFromWebcam.
# Remove "LoadImageFromFile" and related metakeys.
load_from_file = isinstance(imgs[0], str)
is_static_cfg = input_shape is not None
if not load_from_file:
config.test_pipeline.pop(0)
keys_to_remove.append('lq_path')
# Fix the input shape by 'Resize'
if is_static_cfg:
resize = {
'type': 'Resize',
'scale': (input_shape[0], input_shape[1]),
'keys': ['img']
}
config.test_pipeline.insert(1, resize)
for key in keys_to_remove:
for pipeline in list(config.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
config.test_pipeline.remove(pipeline)
if 'keys' in pipeline:
while key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
config.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline:
while key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
return config | Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing. |
188,726 | from copy import deepcopy
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.codebase.mmagic.deploy.mmagic import MMAGIC_TASK
from mmdeploy.utils import Task, get_input_shape
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
from mmagic import datasets # noqa
from mmagic.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
if isinstance(dataloader_cfg, list):
dataset_cfg = [loader.dataset for loader in dataloader_cfg]
dataset_list = [
module_dict.get(dataset.type, None) for dataset in dataset_cfg
]
if len(dataset_list) == 0:
continue
meta_list = []
for i, dataset in enumerate(dataset_list):
if hasattr(dataset, '_load_metainfo') and \
isinstance(dataset._load_metainfo, Callable):
meta = dataset._load_metainfo(dataset_cfg[i].get(
'metainfo', None))
meta_list.append(meta)
if hasattr(dataset, 'METAINFO'):
meta_list.append(dataset.METAINFO)
return meta_list
else:
dataset_cfg = dataloader_cfg.get('dataset', None)
dataset_cls = module_dict.get(dataset_cfg.type, None)
if dataset_cls is None:
continue
if hasattr(dataset_cls, '_load_metainfo') and isinstance(
dataset_cls._load_metainfo, Callable):
meta = dataset_cls._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_cls, 'METAINFO'):
return dataset_cls.METAINFO
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |
188,727 |
The provided code snippet includes necessary dependencies for implementing the `base_edit_model__forward` function. Write a Python function `def base_edit_model__forward( self, batch_inputs: Tensor, data_samples: Optional[List[BaseDataElement]] = None, mode: str = 'predict')` to solve the following problem:
Rewrite `forward` of BaseEditModel for default backend. Args: batch_inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[BaseDataElement], optional): The annotation data of every samples. It's required if ``mode="loss"``. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. Returns: return a list of :obj:`mmengine.BaseDataElement`.
Here is the function:
def base_edit_model__forward(
self,
batch_inputs: Tensor,
data_samples: Optional[List[BaseDataElement]] = None,
mode: str = 'predict'):
"""Rewrite `forward` of BaseEditModel for default backend.
Args:
batch_inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[BaseDataElement], optional): The annotation
data of every samples. It's required if ``mode="loss"``.
Defaults to None.
mode (str): Return what kind of value. Defaults to 'predict'.
Returns:
return a list of :obj:`mmengine.BaseDataElement`.
"""
return self.forward_tensor(batch_inputs, data_samples) | Rewrite `forward` of BaseEditModel for default backend. Args: batch_inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[BaseDataElement], optional): The annotation data of every samples. It's required if ``mode="loss"``. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. Returns: return a list of :obj:`mmengine.BaseDataElement`. |
188,728 | from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.dist import cast_data_device
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_input_shape
from .mmocr import MMOCR_TASK
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: mmengine.Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: mmengine.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (mmengine.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmengine.Config: the model config after processing.
"""
if isinstance(imgs[0], np.ndarray):
# set loading pipeline type
model_cfg.test_pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = model_cfg._cfg_dict.test_pipeline
for i, transform in enumerate(test_pipeline):
if transform.type == 'PackTextRecogInputs':
test_pipeline[i].meta_keys = tuple(
j for j in test_pipeline[i].meta_keys if j != 'instances')
# for static exporting
if input_shape is not None and transform.type == 'RescaleToHeight':
resize = {
'height': input_shape[1],
'min_width': input_shape[0],
'max_width': input_shape[0]
}
test_pipeline[i].update(resize)
test_pipeline = [
transform for transform in test_pipeline
if transform.type != 'LoadOCRAnnotations'
]
model_cfg.test_pipeline = test_pipeline
return model_cfg | Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing. |
188,729 | from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.dist import cast_data_device
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_input_shape
from .mmocr import MMOCR_TASK
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
from mmocr import datasets # noqa
from mmocr.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
if isinstance(dataloader_cfg, list):
dataloader_cfg = dataloader_cfg[0]
dataset_cfg = dataloader_cfg.dataset
dataset_cls = module_dict.get(dataset_cfg.type, None)
if dataset_cls is None:
continue
if hasattr(dataset_cls, '_load_metainfo') and isinstance(
dataset_cls._load_metainfo, Callable):
meta = dataset_cls._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_cls, 'METAINFO'):
return dataset_cls.METAINFO
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |
188,730 | from typing import List, Optional, Sequence, Union
import cv2
import mmengine
import torch
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement, InstanceData
from mmocr.structures import TextDetDataSample
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
load_config)
__BACKEND_MODEL = Registry('backend_text_detectors')
The provided code snippet includes necessary dependencies for implementing the `build_text_detection_model` function. Write a Python function `def build_text_detection_model(model_files: Sequence[str], model_cfg: Union[str, mmengine.Config], deploy_cfg: Union[str, mmengine.Config], device: str, **kwargs)` to solve the following problem:
Build text detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. Returns: BaseBackendModel: Text detector for a configured backend.
Here is the function:
def build_text_detection_model(model_files: Sequence[str],
model_cfg: Union[str, mmengine.Config],
deploy_cfg: Union[str, mmengine.Config],
device: str, **kwargs):
"""Build text detection model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | mmengine.Config): Input model config file or Config
object.
deploy_cfg (str | mmengine.Config): Input deployment config file or
Config object.
device (str): Device to input model.
Returns:
BaseBackendModel: Text detector for a configured backend.
"""
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_text_detector = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
deploy_cfg=deploy_cfg,
model_cfg=model_cfg,
**kwargs))
backend_text_detector = backend_text_detector.to(device)
return backend_text_detector | Build text detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. Returns: BaseBackendModel: Text detector for a configured backend. |
188,731 | from typing import Optional, Sequence, Union
import mmengine
import torch
from mmengine.registry import Registry
from mmengine.structures import LabelData
from mmocr.utils.typing_utils import RecSampleList
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
load_config)
__BACKEND_MODEL = Registry('backend_text_recognizer')
The provided code snippet includes necessary dependencies for implementing the `build_text_recognition_model` function. Write a Python function `def build_text_recognition_model(model_files: Sequence[str], model_cfg: Union[str, mmengine.Config], deploy_cfg: Union[str, mmengine.Config], device: str, **kwargs)` to solve the following problem:
Build text recognition model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. Returns: BaseBackendModel: Text recognizer for a configured backend.
Here is the function:
def build_text_recognition_model(model_files: Sequence[str],
model_cfg: Union[str, mmengine.Config],
deploy_cfg: Union[str, mmengine.Config],
device: str, **kwargs):
"""Build text recognition model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | mmengine.Config): Input model config file or Config
object.
deploy_cfg (str | mmengine.Config): Input deployment config file or
Config object.
device (str): Device to input model.
Returns:
BaseBackendModel: Text recognizer for a configured backend.
"""
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_text_recognizer = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
deploy_cfg=deploy_cfg,
model_cfg=model_cfg,
**kwargs))
backend_text_recognizer = backend_text_recognizer.to(device)
return backend_text_recognizer | Build text recognition model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. Returns: BaseBackendModel: Text recognizer for a configured backend. |
188,732 | from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.dist import cast_data_device
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_input_shape
from .mmocr import MMOCR_TASK
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: mmengine.Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: mmengine.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (mmengine.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmengine.Config: the model config after processing.
"""
pipeline = model_cfg.test_dataloader.dataset.pipeline
if isinstance(imgs[0], np.ndarray):
# set loading pipeline type
pipeline[0].type = 'LoadImageFromNDArray'
for i, transform in enumerate(pipeline):
if transform.type == 'PackTextDetInputs':
pipeline[i].meta_keys = tuple(j for j in pipeline[i].meta_keys
if j != 'instances')
# for static exporting
if input_shape is not None:
if transform.type in ('Resize', 'ShortScaleAspectJitter'):
pipeline[i] = mmengine.ConfigDict(
dict(type='Resize', scale=input_shape, keep_ratio=False))
pipeline = [
transform for transform in pipeline
if transform.type != 'LoadOCRAnnotations'
]
model_cfg.test_dataloader.dataset.pipeline = pipeline
return model_cfg | Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing. |
188,733 | from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.dist import cast_data_device
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_input_shape
from .mmocr import MMOCR_TASK
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
from mmocr import datasets # noqa
from mmocr.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
dataset_cls = module_dict.get(dataset_cfg.type, None)
if dataset_cls is None:
continue
if hasattr(dataset_cls, '_load_metainfo') and isinstance(
dataset_cls._load_metainfo, Callable):
meta = dataset_cls._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_cls, 'METAINFO'):
return dataset_cls.METAINFO
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |