id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
188,533 | import os
from pkg_resources import parse_version
from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `parse_requirements` function. Write a Python function `def parse_requirements(fname='requirements.txt', with_version=True)` to solve the following problem:
Parse the package dependencies listed in a file but strips specific versioning information. Args: fname (str): path to the file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())"
Here is the function:
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a file but strips specific
versioning information.
Args:
fname (str): path to the file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages | Parse the package dependencies listed in a file but strips specific versioning information. Args: fname (str): path to the file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" |
188,534 | import os
from pkg_resources import parse_version
from setuptools import find_packages, setup
EXT_TYPE = ''
try:
import torch
from torch.utils.cpp_extension import BuildExtension
cmd_class = {'build_ext': BuildExtension}
EXT_TYPE = 'torch'
except ModuleNotFoundError:
cmd_class = {}
print('Skip building ext ops due to the absence of torch.')
def get_extensions():
extensions = []
ext_name = 'mmdeploy.backend.torchscript.ts_optimizer'
if EXT_TYPE == 'torch':
import glob
import platform
from torch.utils.cpp_extension import CppExtension
try:
import psutil
num_cpu = len(psutil.Process().cpu_affinity())
cpu_use = max(4, num_cpu - 1)
except (ModuleNotFoundError, AttributeError):
cpu_use = 4
os.environ.setdefault('MAX_JOBS', str(cpu_use))
define_macros = []
# Before PyTorch1.8.0, when compiling CUDA code, `cxx` is a
# required key passed to PyTorch. Even if there is no flag passed
# to cxx, users also need to pass an empty list to PyTorch.
# Since PyTorch1.8.0, it has a default value so users do not need
# to pass an empty list anymore.
# More details at https://github.com/pytorch/pytorch/pull/45956
extra_compile_args = {'cxx': []}
# c++14 is required.
# However, in the windows environment, some standard libraries
# will depend on c++17 or higher. In fact, for the windows
# environment, the compiler will choose the appropriate compiler
# to compile those cpp files, so there is no need to add the
# argument
if platform.system() != 'Windows':
if parse_version(torch.__version__) <= parse_version('1.12.1'):
extra_compile_args['cxx'] = ['-std=c++14']
else:
extra_compile_args['cxx'] = ['-std=c++17']
include_dirs = []
op_files = glob.glob(
'./csrc/mmdeploy/backend_ops/torchscript/optimizer/*.cpp'
) + glob.glob(
'./csrc/mmdeploy/backend_ops/torchscript/optimizer/ir/*.cpp'
) + glob.glob(
'./csrc/mmdeploy/backend_ops/torchscript/optimizer/passes'
'/onnx/*.cpp')
extension = CppExtension
# c++14 is required.
# However, in the windows environment, some standard libraries
# will depend on c++17 or higher. In fact, for the windows
# environment, the compiler will choose the appropriate compiler
# to compile those cpp files, so there is no need to add the
# argument
if 'nvcc' in extra_compile_args and platform.system() != 'Windows':
if parse_version(torch.__version__) <= parse_version('1.12.1'):
extra_compile_args['nvcc'] += ['-std=c++14']
else:
extra_compile_args['nvcc'] += ['-std=c++17']
ext_ops = extension(
name=ext_name,
sources=op_files,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args)
extensions.append(ext_ops)
return extensions | null |
188,535 | import os
import subprocess
import sys
import pytorch_sphinx_theme
from m2r import MdInclude
from recommonmark.transform import AutoStructify
from sphinx.builders.html import StandaloneHTMLBuilder
def generate_doxygen_xml(app):
try:
folder = '../cppapi'
retcode = subprocess.call('cd %s; doxygen' % folder, shell=True)
if retcode < 0:
sys.stderr.write('doxygen terminated by signal %s' % (-retcode))
except Exception as e:
sys.stderr.write('doxygen execution failed: %s' % e)
def setup(app):
# Add hook for building doxygen xml when needed
app.connect('builder-inited', generate_doxygen_xml)
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_config_value('m2r_parse_relative_links', False, 'env')
app.add_config_value('m2r_anonymous_references', False, 'env')
app.add_config_value('m2r_disable_inline_math', False, 'env')
app.add_directive('mdinclude', MdInclude)
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
}, True)
app.add_transform(AutoStructify) | null |
188,537 | from typing import Tuple
version_info = parse_version_info(__version__)
The provided code snippet includes necessary dependencies for implementing the `parse_version_info` function. Write a Python function `def parse_version_info(version_str: str) -> Tuple` to solve the following problem:
Parse version from a string. Args: version_str (str): A string represents a version info. Returns: tuple: A sequence of integer and string represents version.
Here is the function:
def parse_version_info(version_str: str) -> Tuple:
"""Parse version from a string.
Args:
version_str (str): A string represents a version info.
Returns:
tuple: A sequence of integer and string represents version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info) | Parse version from a string. Args: version_str (str): A string represents a version info. Returns: tuple: A sequence of integer and string represents version. |
188,538 | from typing import Dict, Iterable, Optional, Union
import onnx
from .core import PIPELINE_MANAGER
The provided code snippet includes necessary dependencies for implementing the `extract_model` function. Write a Python function `def extract_model(model: Union[str, onnx.ModelProto], start_marker: Union[str, Iterable[str]], end_marker: Union[str, Iterable[str]], start_name_map: Optional[Dict[str, str]] = None, end_name_map: Optional[Dict[str, str]] = None, dynamic_axes: Optional[Dict[str, Dict[int, str]]] = None, save_file: Optional[str] = None) -> onnx.ModelProto` to solve the following problem:
Extract partition-model from an ONNX model. The partition-model is defined by the names of the input and output tensors exactly. Examples: >>> from mmdeploy.apis import extract_model >>> model = 'work_dir/fastrcnn.onnx' >>> start_marker = 'detector:input' >>> end_marker = ['extract_feat:output', 'multiclass_nms[0]:input'] >>> dynamic_axes = { 'input': { 0: 'batch', 2: 'height', 3: 'width' }, 'scores': { 0: 'batch', 1: 'num_boxes', }, 'boxes': { 0: 'batch', 1: 'num_boxes', } } >>> save_file = 'partition_model.onnx' >>> extract_model(model, start_marker, end_marker, \ dynamic_axes=dynamic_axes, \ save_file=save_file) Args: model (str | onnx.ModelProto): Input ONNX model to be extracted. start_marker (str | Sequence[str]): Start marker(s) to extract. end_marker (str | Sequence[str]): End marker(s) to extract. start_name_map (Dict[str, str]): A mapping of start names, defaults to `None`. end_name_map (Dict[str, str]): A mapping of end names, defaults to `None`. dynamic_axes (Dict[str, Dict[int, str]]): A dictionary to specify dynamic axes of input/output, defaults to `None`. save_file (str): A file to save the extracted model, defaults to `None`. Returns: onnx.ModelProto: The extracted model.
Here is the function:
def extract_model(model: Union[str, onnx.ModelProto],
start_marker: Union[str, Iterable[str]],
end_marker: Union[str, Iterable[str]],
start_name_map: Optional[Dict[str, str]] = None,
end_name_map: Optional[Dict[str, str]] = None,
dynamic_axes: Optional[Dict[str, Dict[int, str]]] = None,
save_file: Optional[str] = None) -> onnx.ModelProto:
"""Extract partition-model from an ONNX model.
The partition-model is defined by the names of the input and output tensors
exactly.
Examples:
>>> from mmdeploy.apis import extract_model
>>> model = 'work_dir/fastrcnn.onnx'
>>> start_marker = 'detector:input'
>>> end_marker = ['extract_feat:output', 'multiclass_nms[0]:input']
>>> dynamic_axes = {
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'scores': {
0: 'batch',
1: 'num_boxes',
},
'boxes': {
0: 'batch',
1: 'num_boxes',
}
}
>>> save_file = 'partition_model.onnx'
>>> extract_model(model, start_marker, end_marker, \
dynamic_axes=dynamic_axes, \
save_file=save_file)
Args:
model (str | onnx.ModelProto): Input ONNX model to be extracted.
start_marker (str | Sequence[str]): Start marker(s) to extract.
end_marker (str | Sequence[str]): End marker(s) to extract.
start_name_map (Dict[str, str]): A mapping of start names, defaults to
`None`.
end_name_map (Dict[str, str]): A mapping of end names, defaults to
`None`.
dynamic_axes (Dict[str, Dict[int, str]]): A dictionary to specify
dynamic axes of input/output, defaults to `None`.
save_file (str): A file to save the extracted model, defaults to
`None`.
Returns:
onnx.ModelProto: The extracted model.
"""
from .onnx import extract_partition
return extract_partition(model, start_marker, end_marker, start_name_map,
end_name_map, dynamic_axes, save_file) | Extract partition-model from an ONNX model. The partition-model is defined by the names of the input and output tensors exactly. Examples: >>> from mmdeploy.apis import extract_model >>> model = 'work_dir/fastrcnn.onnx' >>> start_marker = 'detector:input' >>> end_marker = ['extract_feat:output', 'multiclass_nms[0]:input'] >>> dynamic_axes = { 'input': { 0: 'batch', 2: 'height', 3: 'width' }, 'scores': { 0: 'batch', 1: 'num_boxes', }, 'boxes': { 0: 'batch', 1: 'num_boxes', } } >>> save_file = 'partition_model.onnx' >>> extract_model(model, start_marker, end_marker, \ dynamic_axes=dynamic_axes, \ save_file=save_file) Args: model (str | onnx.ModelProto): Input ONNX model to be extracted. start_marker (str | Sequence[str]): Start marker(s) to extract. end_marker (str | Sequence[str]): End marker(s) to extract. start_name_map (Dict[str, str]): A mapping of start names, defaults to `None`. end_name_map (Dict[str, str]): A mapping of end names, defaults to `None`. dynamic_axes (Dict[str, Dict[int, str]]): A dictionary to specify dynamic axes of input/output, defaults to `None`. save_file (str): A file to save the extracted model, defaults to `None`. Returns: onnx.ModelProto: The extracted model. |
188,539 | from copy import deepcopy
from typing import Optional, Union
from mmengine import Config
from .core import PIPELINE_MANAGER, no_mp
The provided code snippet includes necessary dependencies for implementing the `create_calib_input_data` function. Write a Python function `def create_calib_input_data(calib_file: str, deploy_cfg: Union[str, Config], model_cfg: Union[str, Config], model_checkpoint: Optional[str] = None, dataset_cfg: Optional[Union[str, Config]] = None, dataset_type: str = 'val', device: str = 'cpu') -> None` to solve the following problem:
Create dataset for post-training quantization. Args: calib_file (str): The output calibration data file. deploy_cfg (str | Config): Deployment config file or Config object. model_cfg (str | Config): Model config file or Config object. model_checkpoint (str): A checkpoint path of PyTorch model, defaults to `None`. dataset_cfg (Optional[Union[str, Config]], optional): Model config to provide calibration dataset. If none, use `model_cfg` as the dataset config. Defaults to None. dataset_type (str, optional): The dataset type. Defaults to 'val'. device (str, optional): Device to create dataset. Defaults to 'cpu'.
Here is the function:
def create_calib_input_data(calib_file: str,
deploy_cfg: Union[str, Config],
model_cfg: Union[str, Config],
model_checkpoint: Optional[str] = None,
dataset_cfg: Optional[Union[str, Config]] = None,
dataset_type: str = 'val',
device: str = 'cpu') -> None:
"""Create dataset for post-training quantization.
Args:
calib_file (str): The output calibration data file.
deploy_cfg (str | Config): Deployment config file or
Config object.
model_cfg (str | Config): Model config file or Config object.
model_checkpoint (str): A checkpoint path of PyTorch model,
defaults to `None`.
dataset_cfg (Optional[Union[str, Config]], optional): Model
config to provide calibration dataset. If none, use `model_cfg`
as the dataset config. Defaults to None.
dataset_type (str, optional): The dataset type. Defaults to 'val'.
device (str, optional): Device to create dataset. Defaults to 'cpu'.
"""
from mmdeploy.core import patch_model
from mmdeploy.utils import (IR, cfg_apply_marks, get_backend,
get_ir_config, load_config)
from .utils import create_calib_input_data as create_calib_input_data_impl
with no_mp():
if dataset_cfg is None:
dataset_cfg = model_cfg
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
if dataset_cfg is None:
dataset_cfg = model_cfg
# load dataset_cfg if necessary
dataset_cfg = load_config(dataset_cfg)[0]
calib_dataloader = deepcopy(dataset_cfg[f'{dataset_type}_dataloader'])
calib_dataloader['batch_size'] = 1
from mmdeploy.apis.utils import build_task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
apply_marks = cfg_apply_marks(deploy_cfg)
model = task_processor.build_pytorch_model(model_checkpoint)
dataset = task_processor.build_dataset(calib_dataloader['dataset'])
calib_dataloader['dataset'] = dataset
dataloader = task_processor.build_dataloader(calib_dataloader)
# patch model
backend = get_backend(deploy_cfg).value
ir = IR.get(get_ir_config(deploy_cfg)['type'])
patched_model = patch_model(
model, cfg=deploy_cfg, backend=backend, ir=ir)
def get_tensor_func(input_data):
input_data = model.data_preprocessor(input_data)
return input_data['inputs']
create_calib_input_data_impl(
calib_file,
patched_model,
dataloader,
get_tensor_func=get_tensor_func,
inference_func=model.forward,
model_partition=apply_marks,
context_info=dict(cfg=deploy_cfg),
device=device) | Create dataset for post-training quantization. Args: calib_file (str): The output calibration data file. deploy_cfg (str | Config): Deployment config file or Config object. model_cfg (str | Config): Model config file or Config object. model_checkpoint (str): A checkpoint path of PyTorch model, defaults to `None`. dataset_cfg (Optional[Union[str, Config]], optional): Model config to provide calibration dataset. If none, use `model_cfg` as the dataset config. Defaults to None. dataset_type (str, optional): The dataset type. Defaults to 'val'. device (str, optional): Device to create dataset. Defaults to 'cpu'. |
188,540 | import os.path as osp
from typing import Any, Optional, Union
import mmengine
from mmdeploy.apis.core.pipeline_manager import PIPELINE_MANAGER, no_mp
class no_mp:
"""The context manager used to disable multiprocess."""
def __init__(self, manager: PipelineManager = PIPELINE_MANAGER) -> None:
self._manager = manager
self._old_enable_multiprocess = True
def __enter__(self):
self._old_enable_multiprocess = self._manager._enable_multiprocess
self._manager._enable_multiprocess = False
def __exit__(self, type, val, tb):
self._manager._enable_multiprocess = self._old_enable_multiprocess
def trace(func: torch.nn.Module,
inputs: Union[torch.Tensor, Tuple],
output_path_prefix: Optional[str] = None,
backend: Union[Backend, str] = 'default',
input_metas: Optional[Dict] = None,
context_info: Dict = dict(),
check_trace: bool = True,
check_tolerance: float = 1e-05) -> torch.jit.TracedModule:
"""A wrapper of `torch.jit.trace` with some enhancement.
Examples:
>>> from mmdeploy.apis.torch_jit import trace
>>>
>>> func = create_model()
>>> inputs = get_input_tensor()
>>>
>>> jit_model = trace(
>>> func,
>>> inputs,
>>> backend='torchscript',
>>> check_trace=False)
>>>
Args:
func (torch.nn.Module): A Python function or `torch.nn.Module` that
will be run with `example_inputs`.
inputs (torch.Tensor, Tuple): A tuple of example inputs that will be
passed to the function while tracing.
output_path_prefix (str): The model would be serialized in
`<output_path_prefix>.pth`, None if you don't want to
save the model.
backend (Backend|str): Which backend will the graph be used. Different
backend would generate different graph.
input_metas (Dict): The constant inputs of the model.
context_info (Dict): The information that would be used in the context
of exporting.
check_trace (bool): Check if the same inputs run through traced code
produce the same outputs.
check_tolerance (float): Floating-point comparison tolerance to use in
the checker procedure.
Returns:
torch.jit.TracedModule: The traced torch jit model.
"""
logger = get_root_logger()
logger.info('Export PyTorch model to torchscript.')
def _add_or_update(cfg: dict, key: str, val: Any):
if key in cfg and isinstance(cfg[key], dict) and isinstance(val, dict):
cfg[key].update(val)
else:
cfg[key] = val
context_info = deepcopy(context_info)
deploy_cfg = context_info.pop('deploy_cfg', dict())
ir_config = dict(type='torchscript')
_add_or_update(deploy_cfg, 'ir_config', ir_config)
if isinstance(backend, Backend):
backend = backend.value
backend_config = dict(type=backend)
_add_or_update(deploy_cfg, 'backend_config', backend_config)
context_info['cfg'] = deploy_cfg
if 'backend' not in context_info:
context_info['backend'] = backend
elif context_info['backend'] != backend:
logger.warning(
f'Find backend {context_info["backend"]} in context_info.'
f' Expect {backend}.')
if 'ir' not in context_info:
context_info['ir'] = IR.TORCHSCRIPT
elif context_info['ir'] != backend:
logger.warning(f'Find ir {context_info["ir"]} in context_info.'
f' Expect {IR.TORCHSCRIPT}.')
# patch model
if isinstance(func, torch.nn.Module):
ir = IR.get(get_ir_config(deploy_cfg)['type'])
func = patch_model(func, cfg=deploy_cfg, backend=backend, ir=ir)
with RewriterContext(**context_info), torch.no_grad():
# patch input_metas
if input_metas is not None:
assert isinstance(
input_metas, dict
), f'Expect input_metas type is dict, get {type(input_metas)}.'
model_forward = func.forward
func.forward = partial(func.forward, **input_metas)
# for exporting models with weight that depends on inputs
func(*inputs) if isinstance(inputs, Sequence) \
else func(inputs)
ts_model = torch.jit.trace(
func,
inputs,
check_trace=check_trace,
check_tolerance=check_tolerance)
if input_metas is not None:
func.forward = model_forward
# save model
if output_path_prefix is not None:
output_path = output_path_prefix + '.pt'
logger.info(f'Save PyTorch model: {output_path}.')
torch.jit.save(ts_model, output_path)
return ts_model
The provided code snippet includes necessary dependencies for implementing the `torch2torchscript` function. Write a Python function `def torch2torchscript(img: Any, work_dir: str, save_file: str, deploy_cfg: Union[str, mmengine.Config], model_cfg: Union[str, mmengine.Config], model_checkpoint: Optional[str] = None, device: str = 'cuda:0')` to solve the following problem:
Convert PyTorch model to torchscript model. Args: img (str | np.ndarray | torch.Tensor): Input image used to assist converting model. work_dir (str): A working directory to save files. save_file (str): Filename to save torchscript model. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. model_cfg (str | mmengine.Config): Model config file or Config object. model_checkpoint (str): A checkpoint path of PyTorch model, defaults to `None`. device (str): A string specifying device type, defaults to 'cuda:0'.
Here is the function:
def torch2torchscript(img: Any,
work_dir: str,
save_file: str,
deploy_cfg: Union[str, mmengine.Config],
model_cfg: Union[str, mmengine.Config],
model_checkpoint: Optional[str] = None,
device: str = 'cuda:0'):
"""Convert PyTorch model to torchscript model.
Args:
img (str | np.ndarray | torch.Tensor): Input image used to assist
converting model.
work_dir (str): A working directory to save files.
save_file (str): Filename to save torchscript model.
deploy_cfg (str | mmengine.Config): Deployment config file or
Config object.
model_cfg (str | mmengine.Config): Model config file or Config object.
model_checkpoint (str): A checkpoint path of PyTorch model,
defaults to `None`.
device (str): A string specifying device type, defaults to 'cuda:0'.
"""
import torch
from mmdeploy.utils import get_backend, get_input_shape, load_config
from .torch_jit import trace
# load deploy_cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
mmengine.mkdir_or_exist(osp.abspath(work_dir))
input_shape = get_input_shape(deploy_cfg)
from mmdeploy.apis import build_task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
torch_model = task_processor.build_pytorch_model(model_checkpoint)
data, model_inputs = task_processor.create_input(
img,
input_shape,
data_preprocessor=getattr(torch_model, 'data_preprocessor', None))
data_samples = data['data_samples']
input_metas = {'data_samples': data_samples, 'mode': 'predict'}
if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:
model_inputs = model_inputs[0]
context_info = dict(deploy_cfg=deploy_cfg)
backend = get_backend(deploy_cfg).value
output_prefix = osp.join(work_dir, osp.splitext(save_file)[0])
if model_inputs.device != device:
model_inputs = model_inputs.to(device)
with no_mp():
trace(
torch_model,
model_inputs,
output_path_prefix=output_prefix,
backend=backend,
input_metas=input_metas,
context_info=context_info,
check_trace=False) | Convert PyTorch model to torchscript model. Args: img (str | np.ndarray | torch.Tensor): Input image used to assist converting model. work_dir (str): A working directory to save files. save_file (str): Filename to save torchscript model. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. model_cfg (str | mmengine.Config): Model config file or Config object. model_checkpoint (str): A checkpoint path of PyTorch model, defaults to `None`. device (str): A string specifying device type, defaults to 'cuda:0'. |
188,541 | from typing import Optional, Sequence, Union
import mmengine
import numpy as np
import torch
from mmdeploy.utils import Backend, get_backend, get_input_shape, load_config
The provided code snippet includes necessary dependencies for implementing the `visualize_model` function. Write a Python function `def visualize_model(model_cfg: Union[str, mmengine.Config], deploy_cfg: Union[str, mmengine.Config], model: Union[str, Sequence[str]], img: Union[str, np.ndarray, Sequence[str]], device: str, backend: Optional[Backend] = None, output_file: Optional[str] = None, show_result: bool = False, **kwargs)` to solve the following problem:
Run inference with PyTorch or backend model and show results. Examples: >>> from mmdeploy.apis import visualize_model >>> model_cfg = ('mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py') >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_onnxruntime_dynamic.py') >>> model = 'work_dir/fcos.onnx' >>> img = 'demo.jpg' >>> device = 'cpu' >>> visualize_model(model_cfg, deploy_cfg, model, \ img, device, show_result=True) Args: model_cfg (str | mmengine.Config): Model config file or Config object. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. model (str | Sequence[str]): Input model or file(s). img (str | np.ndarray | Sequence[str]): Input image file or numpy array for inference. device (str): A string specifying device type. backend (Backend): Specifying backend type, defaults to `None`. output_file (str): Output file to save visualized image, defaults to `None`. Only valid if `show_result` is set to `False`. show_result (bool): Whether to show plotted image in windows, defaults to `False`.
Here is the function:
def visualize_model(model_cfg: Union[str, mmengine.Config],
deploy_cfg: Union[str, mmengine.Config],
model: Union[str, Sequence[str]],
img: Union[str, np.ndarray, Sequence[str]],
device: str,
backend: Optional[Backend] = None,
output_file: Optional[str] = None,
show_result: bool = False,
**kwargs):
"""Run inference with PyTorch or backend model and show results.
Examples:
>>> from mmdeploy.apis import visualize_model
>>> model_cfg = ('mmdetection/configs/fcos/'
'fcos_r50_caffe_fpn_gn-head_1x_coco.py')
>>> deploy_cfg = ('configs/mmdet/detection/'
'detection_onnxruntime_dynamic.py')
>>> model = 'work_dir/fcos.onnx'
>>> img = 'demo.jpg'
>>> device = 'cpu'
>>> visualize_model(model_cfg, deploy_cfg, model, \
img, device, show_result=True)
Args:
model_cfg (str | mmengine.Config): Model config file or Config object.
deploy_cfg (str | mmengine.Config): Deployment config file or Config
object.
model (str | Sequence[str]): Input model or file(s).
img (str | np.ndarray | Sequence[str]): Input image file or numpy array
for inference.
device (str): A string specifying device type.
backend (Backend): Specifying backend type, defaults to `None`.
output_file (str): Output file to save visualized image, defaults to
`None`. Only valid if `show_result` is set to `False`.
show_result (bool): Whether to show plotted image in windows, defaults
to `False`.
"""
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
from mmdeploy.apis.utils import build_task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
input_shape = get_input_shape(deploy_cfg)
if backend is None:
backend = get_backend(deploy_cfg)
if isinstance(model, str):
model = [model]
if isinstance(model, (list, tuple)):
assert len(model) > 0, 'Model should have at least one element.'
if backend == Backend.PYTORCH:
model = task_processor.build_pytorch_model(model[0])
else:
model = task_processor.build_backend_model(
model,
data_preprocessor_updater=task_processor.
update_data_preprocessor)
model_inputs, _ = task_processor.create_input(img, input_shape)
with torch.no_grad():
result = model.test_step(model_inputs)[0]
if show_result:
try:
# check headless
import tkinter
tkinter.Tk()
except Exception as e:
from mmdeploy.utils import get_root_logger
logger = get_root_logger()
logger.warning(
f'render and display result skipped for headless device, exception {e}' # noqa: E501
)
show_result = False
if isinstance(img, str) or not isinstance(img, Sequence):
img = [img]
for single_img in img:
task_processor.visualize(
image=single_img,
model=model,
result=result,
output_file=output_file,
window_name=backend.value,
show_result=show_result) | Run inference with PyTorch or backend model and show results. Examples: >>> from mmdeploy.apis import visualize_model >>> model_cfg = ('mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py') >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_onnxruntime_dynamic.py') >>> model = 'work_dir/fcos.onnx' >>> img = 'demo.jpg' >>> device = 'cpu' >>> visualize_model(model_cfg, deploy_cfg, model, \ img, device, show_result=True) Args: model_cfg (str | mmengine.Config): Model config file or Config object. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. model (str | Sequence[str]): Input model or file(s). img (str | np.ndarray | Sequence[str]): Input image file or numpy array for inference. device (str): A string specifying device type. backend (Backend): Specifying backend type, defaults to `None`. output_file (str): Output file to save visualized image, defaults to `None`. Only valid if `show_result` is set to `False`. show_result (bool): Whether to show plotted image in windows, defaults to `False`. |
188,542 | from typing import Any, Sequence, Union
import mmengine
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `inference_model` function. Write a Python function `def inference_model(model_cfg: Union[str, mmengine.Config], deploy_cfg: Union[str, mmengine.Config], backend_files: Sequence[str], img: Union[str, np.ndarray], device: str) -> Any` to solve the following problem:
Run inference with PyTorch or backend model and show results. Examples: >>> from mmdeploy.apis import inference_model >>> model_cfg = ('mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py') >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_onnxruntime_dynamic.py') >>> backend_files = ['work_dir/fcos.onnx'] >>> img = 'demo.jpg' >>> device = 'cpu' >>> model_output = inference_model(model_cfg, deploy_cfg, backend_files, img, device) Args: model_cfg (str | mmengine.Config): Model config file or Config object. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. backend_files (Sequence[str]): Input backend model file(s). img (str | np.ndarray): Input image file or numpy array for inference. device (str): A string specifying device type. Returns: Any: The inference results
Here is the function:
def inference_model(model_cfg: Union[str, mmengine.Config],
deploy_cfg: Union[str, mmengine.Config],
backend_files: Sequence[str], img: Union[str, np.ndarray],
device: str) -> Any:
"""Run inference with PyTorch or backend model and show results.
Examples:
>>> from mmdeploy.apis import inference_model
>>> model_cfg = ('mmdetection/configs/fcos/'
'fcos_r50_caffe_fpn_gn-head_1x_coco.py')
>>> deploy_cfg = ('configs/mmdet/detection/'
'detection_onnxruntime_dynamic.py')
>>> backend_files = ['work_dir/fcos.onnx']
>>> img = 'demo.jpg'
>>> device = 'cpu'
>>> model_output = inference_model(model_cfg, deploy_cfg,
backend_files, img, device)
Args:
model_cfg (str | mmengine.Config): Model config file or Config object.
deploy_cfg (str | mmengine.Config): Deployment config file or Config
object.
backend_files (Sequence[str]): Input backend model file(s).
img (str | np.ndarray): Input image file or numpy array for inference.
device (str): A string specifying device type.
Returns:
Any: The inference results
"""
import torch
from mmdeploy.utils import get_input_shape, load_config
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
from mmdeploy.apis.utils import build_task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
model = task_processor.build_backend_model(
backend_files, task_processor.update_data_preprocessor)
input_shape = get_input_shape(deploy_cfg)
model_inputs, _ = task_processor.create_input(img, input_shape)
with torch.no_grad():
result = model.test_step(model_inputs)
return result | Run inference with PyTorch or backend model and show results. Examples: >>> from mmdeploy.apis import inference_model >>> model_cfg = ('mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py') >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_onnxruntime_dynamic.py') >>> backend_files = ['work_dir/fcos.onnx'] >>> img = 'demo.jpg' >>> device = 'cpu' >>> model_output = inference_model(model_cfg, deploy_cfg, backend_files, img, device) Args: model_cfg (str | mmengine.Config): Model config file or Config object. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. backend_files (Sequence[str]): Input backend model file(s). img (str | np.ndarray): Input image file or numpy array for inference. device (str): A string specifying device type. Returns: Any: The inference results |
188,543 | import os.path as osp
from typing import Any, Optional, Union
import mmengine
from .core import PIPELINE_MANAGER
class no_mp:
"""The context manager used to disable multiprocess."""
def __init__(self, manager: PipelineManager = PIPELINE_MANAGER) -> None:
self._manager = manager
self._old_enable_multiprocess = True
def __enter__(self):
self._old_enable_multiprocess = self._manager._enable_multiprocess
self._manager._enable_multiprocess = False
def __exit__(self, type, val, tb):
self._manager._enable_multiprocess = self._old_enable_multiprocess
def export(model: torch.nn.Module,
args: Union[torch.Tensor, Tuple, Dict],
output_path_prefix: str,
backend: Union[Backend, str] = 'default',
input_metas: Optional[Dict] = None,
context_info: Dict = dict(),
input_names: Optional[Sequence[str]] = None,
output_names: Optional[Sequence[str]] = None,
opset_version: int = 11,
dynamic_axes: Optional[Dict] = None,
verbose: bool = False,
keep_initializers_as_inputs: Optional[bool] = None,
optimize: bool = False):
"""Export a PyTorch model into ONNX format. This is a wrap of
`torch.onnx.export` with some enhancement.
Examples:
>>> from mmdeploy.apis.onnx import export
>>>
>>> model = create_model()
>>> args = get_input_tensor()
>>>
>>> export(
>>> model,
>>> args,
>>> 'place/to/save/model',
>>> backend='tensorrt',
>>> input_names=['input'],
>>> output_names=['output'],
>>> dynamic_axes={'input': {
>>> 0: 'batch',
>>> 2: 'height',
>>> 3: 'width'
>>> }})
Args:
model (torch.nn.Module): the model to be exported.
args (torch.Tensor|Tuple|Dict): Dummy input of the model.
output_path_prefix (str): The output file prefix. The model will
be saved to `<output_path_prefix>.onnx`.
backend (Backend|str): Which backend will the graph be used. Different
backend would generate different graph.
input_metas (Dict): The constant inputs of the model.
context_info (Dict): The information that would be used in the context
of exporting.
input_names (Sequence[str]): The input names of the model.
output_names (Sequence[str]): The output names of the model.
opset_version (int): The version of ONNX opset version. 11 as default.
dynamic_axes (Dict): The information used to determine which axes are
dynamic.
verbose (bool): Enable verbose model on `torch.onnx.export`.
keep_initializers_as_inputs (bool): Whether we should add inputs for
each initializer.
optimize (bool): Perform optimize on model.
"""
output_path = output_path_prefix + '.onnx'
logger = get_root_logger()
logger.info(f'Export PyTorch model to ONNX: {output_path}.')
def _add_or_update(cfg: dict, key: str, val: Any):
if key in cfg and isinstance(cfg[key], dict) and isinstance(val, dict):
cfg[key].update(val)
else:
cfg[key] = val
context_info = deepcopy(context_info)
deploy_cfg = context_info.pop('deploy_cfg', dict())
ir_config = dict(
type='onnx',
input_names=input_names,
output_names=output_names,
opset_version=opset_version,
dynamic_axes=dynamic_axes,
verbose=verbose,
keep_initializers_as_inputs=keep_initializers_as_inputs)
_add_or_update(deploy_cfg, 'ir_config', ir_config)
ir = IR.get(get_ir_config(deploy_cfg)['type'])
if isinstance(backend, Backend):
backend = backend.value
backend_config = dict(type=backend)
_add_or_update(deploy_cfg, 'backend_config', backend_config)
context_info['cfg'] = deploy_cfg
context_info['ir'] = ir
if 'backend' not in context_info:
context_info['backend'] = backend
if 'opset' not in context_info:
context_info['opset'] = opset_version
# patch model
patched_model = patch_model(model, cfg=deploy_cfg, backend=backend, ir=ir)
if 'onnx_custom_passes' not in context_info:
onnx_custom_passes = optimize_onnx if optimize else None
context_info['onnx_custom_passes'] = onnx_custom_passes
with RewriterContext(**context_info), torch.no_grad():
# patch input_metas
if input_metas is not None:
assert isinstance(
input_metas, dict
), f'Expect input_metas type is dict, get {type(input_metas)}.'
model_forward = patched_model.forward
def wrap_forward(forward):
def wrapper(*arg, **kwargs):
return forward(*arg, **kwargs)
return wrapper
patched_model.forward = wrap_forward(patched_model.forward)
patched_model.forward = partial(patched_model.forward,
**input_metas)
# force to export on cpu
patched_model = patched_model.cpu()
if isinstance(args, torch.Tensor):
args = args.cpu()
elif isinstance(args, (tuple, list)):
args = tuple([_.cpu() for _ in args])
else:
raise RuntimeError(f'Not supported args: {args}')
torch.onnx.export(
patched_model,
args,
output_path,
export_params=True,
input_names=input_names,
output_names=output_names,
opset_version=opset_version,
dynamic_axes=dynamic_axes,
keep_initializers_as_inputs=keep_initializers_as_inputs,
verbose=verbose)
if input_metas is not None:
patched_model.forward = model_forward
The provided code snippet includes necessary dependencies for implementing the `torch2onnx` function. Write a Python function `def torch2onnx(img: Any, work_dir: str, save_file: str, deploy_cfg: Union[str, mmengine.Config], model_cfg: Union[str, mmengine.Config], model_checkpoint: Optional[str] = None, device: str = 'cuda:0')` to solve the following problem:
Convert PyTorch model to ONNX model. Examples: >>> from mmdeploy.apis import torch2onnx >>> img = 'demo.jpg' >>> work_dir = 'work_dir' >>> save_file = 'fcos.onnx' >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_onnxruntime_dynamic.py') >>> model_cfg = ('mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py') >>> model_checkpoint = ('checkpoints/' 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth') >>> device = 'cpu' >>> torch2onnx(img, work_dir, save_file, deploy_cfg, \ model_cfg, model_checkpoint, device) Args: img (str | np.ndarray | torch.Tensor): Input image used to assist converting model. work_dir (str): A working directory to save files. save_file (str): Filename to save onnx model. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. model_cfg (str | mmengine.Config): Model config file or Config object. model_checkpoint (str): A checkpoint path of PyTorch model, defaults to `None`. device (str): A string specifying device type, defaults to 'cuda:0'.
Here is the function:
def torch2onnx(img: Any,
work_dir: str,
save_file: str,
deploy_cfg: Union[str, mmengine.Config],
model_cfg: Union[str, mmengine.Config],
model_checkpoint: Optional[str] = None,
device: str = 'cuda:0'):
"""Convert PyTorch model to ONNX model.
Examples:
>>> from mmdeploy.apis import torch2onnx
>>> img = 'demo.jpg'
>>> work_dir = 'work_dir'
>>> save_file = 'fcos.onnx'
>>> deploy_cfg = ('configs/mmdet/detection/'
'detection_onnxruntime_dynamic.py')
>>> model_cfg = ('mmdetection/configs/fcos/'
'fcos_r50_caffe_fpn_gn-head_1x_coco.py')
>>> model_checkpoint = ('checkpoints/'
'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth')
>>> device = 'cpu'
>>> torch2onnx(img, work_dir, save_file, deploy_cfg, \
model_cfg, model_checkpoint, device)
Args:
img (str | np.ndarray | torch.Tensor): Input image used to assist
converting model.
work_dir (str): A working directory to save files.
save_file (str): Filename to save onnx model.
deploy_cfg (str | mmengine.Config): Deployment config file or
Config object.
model_cfg (str | mmengine.Config): Model config file or Config object.
model_checkpoint (str): A checkpoint path of PyTorch model,
defaults to `None`.
device (str): A string specifying device type, defaults to 'cuda:0'.
"""
from mmdeploy.apis.core.pipeline_manager import no_mp
from mmdeploy.utils import (Backend, get_backend, get_dynamic_axes,
get_input_shape, get_onnx_config, load_config)
from .onnx import export
# load deploy_cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
mmengine.mkdir_or_exist(osp.abspath(work_dir))
input_shape = get_input_shape(deploy_cfg)
# create model an inputs
from mmdeploy.apis import build_task_processor
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
torch_model = task_processor.build_pytorch_model(model_checkpoint)
data, model_inputs = task_processor.create_input(
img,
input_shape,
data_preprocessor=getattr(torch_model, 'data_preprocessor', None))
if isinstance(model_inputs, list) and len(model_inputs) == 1:
model_inputs = model_inputs[0]
data_samples = data['data_samples']
input_metas = {'data_samples': data_samples, 'mode': 'predict'}
# export to onnx
context_info = dict()
context_info['deploy_cfg'] = deploy_cfg
output_prefix = osp.join(work_dir,
osp.splitext(osp.basename(save_file))[0])
backend = get_backend(deploy_cfg).value
onnx_cfg = get_onnx_config(deploy_cfg)
opset_version = onnx_cfg.get('opset_version', 11)
input_names = onnx_cfg['input_names']
output_names = onnx_cfg['output_names']
axis_names = input_names + output_names
dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names)
verbose = not onnx_cfg.get('strip_doc_string', True) or onnx_cfg.get(
'verbose', False)
keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs',
True)
optimize = onnx_cfg.get('optimize', False)
if backend == Backend.NCNN.value:
"""NCNN backend needs a precise blob counts, while using onnx optimizer
will merge duplicate initilizers without reference count."""
optimize = False
with no_mp():
export(
torch_model,
model_inputs,
input_metas=input_metas,
output_path_prefix=output_prefix,
backend=backend,
input_names=input_names,
output_names=output_names,
context_info=context_info,
opset_version=opset_version,
dynamic_axes=dynamic_axes,
verbose=verbose,
keep_initializers_as_inputs=keep_initializers_as_inputs,
optimize=optimize) | Convert PyTorch model to ONNX model. Examples: >>> from mmdeploy.apis import torch2onnx >>> img = 'demo.jpg' >>> work_dir = 'work_dir' >>> save_file = 'fcos.onnx' >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_onnxruntime_dynamic.py') >>> model_cfg = ('mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py') >>> model_checkpoint = ('checkpoints/' 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth') >>> device = 'cpu' >>> torch2onnx(img, work_dir, save_file, deploy_cfg, \ model_cfg, model_checkpoint, device) Args: img (str | np.ndarray | torch.Tensor): Input image used to assist converting model. work_dir (str): A working directory to save files. save_file (str): Filename to save onnx model. deploy_cfg (str | mmengine.Config): Deployment config file or Config object. model_cfg (str | mmengine.Config): Model config file or Config object. model_checkpoint (str): A checkpoint path of PyTorch model, defaults to `None`. device (str): A string specifying device type, defaults to 'cuda:0'. |
188,544 | import importlib
import inspect
import logging
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `_get_func_name` function. Write a Python function `def _get_func_name(func: Callable) -> str` to solve the following problem:
get function name.
Here is the function:
def _get_func_name(func: Callable) -> str:
"""get function name."""
assert isinstance(func, Callable), f'{func} is not a Callable object.'
_func_name = None
if hasattr(func, '__qualname__'):
_func_name = f'{func.__module__}.{func.__qualname__}'
elif hasattr(func, '__class__'):
_func_name = func.__class__
else:
_func_name = str(func)
return _func_name | get function name. |
188,545 | from typing import Dict, Iterable, Optional, Union
import onnx
import onnx.helper
import onnx.utils
from mmdeploy.apis.core import PIPELINE_MANAGER
from mmdeploy.core.optimizers import (attribute_to_dict, create_extractor,
get_new_name, parse_extractor_io_string,
remove_identity, remove_imports,
rename_value)
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `extract_partition` function. Write a Python function `def extract_partition(model: Union[str, onnx.ModelProto], start_marker: Union[str, Iterable[str]], end_marker: Union[str, Iterable[str]], start_name_map: Optional[Dict[str, str]] = None, end_name_map: Optional[Dict[str, str]] = None, dynamic_axes: Optional[Dict[str, Dict[int, str]]] = None, save_file: Optional[str] = None) -> onnx.ModelProto` to solve the following problem:
Extract partition-model from an ONNX model. The partition-model is defined by the names of the input and output tensors exactly. Examples: >>> from mmdeploy.apis import extract_model >>> model = 'work_dir/fastrcnn.onnx' >>> start_marker = 'detector:input' >>> end_marker = ['extract_feat:output', 'multiclass_nms[0]:input'] >>> dynamic_axes = { 'input': { 0: 'batch', 2: 'height', 3: 'width' }, 'scores': { 0: 'batch', 1: 'num_boxes', }, 'boxes': { 0: 'batch', 1: 'num_boxes', } } >>> save_file = 'partition_model.onnx' >>> extract_partition(model, start_marker, end_marker, \ dynamic_axes=dynamic_axes, \ save_file=save_file) Args: model (str | onnx.ModelProto): Input ONNX model to be extracted. start_marker (str | Sequence[str]): Start marker(s) to extract. end_marker (str | Sequence[str]): End marker(s) to extract. start_name_map (Dict[str, str]): A mapping of start names, defaults to `None`. end_name_map (Dict[str, str]): A mapping of end names, defaults to `None`. dynamic_axes (Dict[str, Dict[int, str]]): A dictionary to specify dynamic axes of input/output, defaults to `None`. save_file (str): A file to save the extracted model, defaults to `None`. Returns: onnx.ModelProto: The extracted model.
Here is the function:
def extract_partition(model: Union[str, onnx.ModelProto],
start_marker: Union[str, Iterable[str]],
end_marker: Union[str, Iterable[str]],
start_name_map: Optional[Dict[str, str]] = None,
end_name_map: Optional[Dict[str, str]] = None,
dynamic_axes: Optional[Dict[str, Dict[int, str]]] = None,
save_file: Optional[str] = None) -> onnx.ModelProto:
"""Extract partition-model from an ONNX model.
The partition-model is defined by the names of the input and output tensors
exactly.
Examples:
>>> from mmdeploy.apis import extract_model
>>> model = 'work_dir/fastrcnn.onnx'
>>> start_marker = 'detector:input'
>>> end_marker = ['extract_feat:output', 'multiclass_nms[0]:input']
>>> dynamic_axes = {
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'scores': {
0: 'batch',
1: 'num_boxes',
},
'boxes': {
0: 'batch',
1: 'num_boxes',
}
}
>>> save_file = 'partition_model.onnx'
>>> extract_partition(model, start_marker, end_marker, \
dynamic_axes=dynamic_axes, \
save_file=save_file)
Args:
model (str | onnx.ModelProto): Input ONNX model to be extracted.
start_marker (str | Sequence[str]): Start marker(s) to extract.
end_marker (str | Sequence[str]): End marker(s) to extract.
start_name_map (Dict[str, str]): A mapping of start names, defaults to
`None`.
end_name_map (Dict[str, str]): A mapping of end names, defaults to
`None`.
dynamic_axes (Dict[str, Dict[int, str]]): A dictionary to specify
dynamic axes of input/output, defaults to `None`.
save_file (str): A file to save the extracted model, defaults to
`None`.
Returns:
onnx.ModelProto: The extracted model.
"""
if isinstance(model, str):
model = onnx.load(model)
num_value_info = len(model.graph.value_info)
inputs = []
outputs = []
logger = get_root_logger()
if not isinstance(start_marker, (list, tuple)):
start_marker = [start_marker]
for s in start_marker:
start_name, func_id, start_type = parse_extractor_io_string(s)
for node in model.graph.node:
if node.op_type == 'Mark':
attr = attribute_to_dict(node.attribute)
if attr['func'] == start_name and attr[
'type'] == start_type and attr['func_id'] == func_id:
name = node.input[0]
if name not in inputs:
new_name = get_new_name(
attr, mark_name=s, name_map=start_name_map)
rename_value(model, name, new_name)
if not any([
v_info.name == new_name
for v_info in model.graph.value_info
]):
new_val_info = onnx.helper.make_tensor_value_info(
new_name, attr['dtype'], attr['shape'])
model.graph.value_info.append(new_val_info)
inputs.append(new_name)
logger.info(f'inputs: {", ".join(inputs)}')
# collect outputs
if not isinstance(end_marker, (list, tuple)):
end_marker = [end_marker]
for e in end_marker:
end_name, func_id, end_type = parse_extractor_io_string(e)
for node in model.graph.node:
if node.op_type == 'Mark':
attr = attribute_to_dict(node.attribute)
if attr['func'] == end_name and attr[
'type'] == end_type and attr['func_id'] == func_id:
name = node.output[0]
if name not in outputs:
new_name = get_new_name(
attr, mark_name=e, name_map=end_name_map)
rename_value(model, name, new_name)
if not any([
v_info.name == new_name
for v_info in model.graph.value_info
]):
new_val_info = onnx.helper.make_tensor_value_info(
new_name, attr['dtype'], attr['shape'])
model.graph.value_info.append(new_val_info)
outputs.append(new_name)
logger.info(f'outputs: {", ".join(outputs)}')
# replace Mark with Identity
for node in model.graph.node:
if node.op_type == 'Mark':
del node.attribute[:]
node.domain = ''
node.op_type = 'Identity'
extractor = create_extractor(model)
extracted_model = extractor.extract_model(inputs, outputs)
# remove all Identity, this may be done by onnx simplifier
remove_identity(extracted_model)
# collect all used inputs
used = set()
for node in extracted_model.graph.node:
for input in node.input:
used.add(input)
for output in extracted_model.graph.output:
used.add(output.name)
# delete unused inputs
success = True
while success:
success = False
for i, input in enumerate(extracted_model.graph.input):
if input.name not in used:
del extracted_model.graph.input[i]
success = True
break
# eliminate output without shape
for xs in [extracted_model.graph.output]:
for x in xs:
if not x.type.tensor_type.shape.dim:
logger.info(f'fixing output shape: {x.name}')
x.CopyFrom(
onnx.helper.make_tensor_value_info(
x.name, x.type.tensor_type.elem_type, []))
# eliminate 0-batch dimension, dirty workaround for two-stage detectors
for input in extracted_model.graph.input:
if input.name in inputs:
if input.type.tensor_type.shape.dim[0].dim_value == 0:
input.type.tensor_type.shape.dim[0].dim_value = 1
# eliminate duplicated value_info for inputs
success = True
# num_value_info == 0 if dynamic shape
if num_value_info == 0:
while len(extracted_model.graph.value_info) > 0:
extracted_model.graph.value_info.pop()
while success:
success = False
for i, x in enumerate(extracted_model.graph.value_info):
if x.name in inputs:
del extracted_model.graph.value_info[i]
success = True
break
# dynamic shape support
if dynamic_axes is not None:
for input_node in extracted_model.graph.input:
if input_node.name in dynamic_axes:
axes = dynamic_axes[input_node.name]
for k, v in axes.items():
input_node.type.tensor_type.shape.dim[k].dim_value = 0
input_node.type.tensor_type.shape.dim[k].dim_param = v
for output_node in extracted_model.graph.output:
for idx, dim in enumerate(output_node.type.tensor_type.shape.dim):
dim.dim_value = 0
dim.dim_param = f'dim_{idx}'
# remove mmdeploy domain if useless
remove_imports(extracted_model)
# save extract_model if save_file is given
if save_file is not None:
onnx.save(extracted_model, save_file)
return extracted_model | Extract partition-model from an ONNX model. The partition-model is defined by the names of the input and output tensors exactly. Examples: >>> from mmdeploy.apis import extract_model >>> model = 'work_dir/fastrcnn.onnx' >>> start_marker = 'detector:input' >>> end_marker = ['extract_feat:output', 'multiclass_nms[0]:input'] >>> dynamic_axes = { 'input': { 0: 'batch', 2: 'height', 3: 'width' }, 'scores': { 0: 'batch', 1: 'num_boxes', }, 'boxes': { 0: 'batch', 1: 'num_boxes', } } >>> save_file = 'partition_model.onnx' >>> extract_partition(model, start_marker, end_marker, \ dynamic_axes=dynamic_axes, \ save_file=save_file) Args: model (str | onnx.ModelProto): Input ONNX model to be extracted. start_marker (str | Sequence[str]): Start marker(s) to extract. end_marker (str | Sequence[str]): End marker(s) to extract. start_name_map (Dict[str, str]): A mapping of start names, defaults to `None`. end_name_map (Dict[str, str]): A mapping of end names, defaults to `None`. dynamic_axes (Dict[str, Dict[int, str]]): A dictionary to specify dynamic axes of input/output, defaults to `None`. save_file (str): A file to save the extracted model, defaults to `None`. Returns: onnx.ModelProto: The extracted model. |
188,546 | from typing import Callable
import torch
from mmdeploy.core import FUNCTION_REWRITER
def update_squeeze_unsqueeze_opset13_pass(graph, params_dict, torch_out):
"""Update Squeeze/Unsqueeze axes for opset13."""
for node in graph.nodes():
if node.kind() in ['onnx::Squeeze', 'onnx::Unsqueeze'] and \
node.hasAttribute('axes'):
axes = node['axes']
axes_node = graph.create('onnx::Constant')
axes_node.t_('value', torch.LongTensor(axes))
node.removeAttribute('axes')
node.addInput(axes_node.output())
axes_node.insertBefore(node)
return graph, params_dict, torch_out
The provided code snippet includes necessary dependencies for implementing the `model_to_graph__custom_optimizer` function. Write a Python function `def model_to_graph__custom_optimizer(*args, **kwargs)` to solve the following problem:
Rewriter of _model_to_graph, add custom passes.
Here is the function:
def model_to_graph__custom_optimizer(*args, **kwargs):
"""Rewriter of _model_to_graph, add custom passes."""
ctx = FUNCTION_REWRITER.get_context()
graph, params_dict, torch_out = ctx.origin_func(*args, **kwargs)
if hasattr(ctx, 'opset'):
opset_version = ctx.opset
else:
from mmdeploy.utils import get_ir_config
opset_version = get_ir_config(ctx.cfg).get('opset_version', 11)
if opset_version >= 13:
graph, params_dict, torch_out = update_squeeze_unsqueeze_opset13_pass(
graph, params_dict, torch_out)
custom_passes = getattr(ctx, 'onnx_custom_passes', None)
if custom_passes is not None:
assert isinstance(
custom_passes, Callable
), f'Expect a callable onnx_custom_passes, get {type(custom_passes)}.'
graph, params_dict, torch_out = custom_passes(ctx, graph, params_dict,
torch_out)
return graph, params_dict, torch_out | Rewriter of _model_to_graph, add custom passes. |
188,547 | from typing import Callable
import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `jit_pass_onnx_deduplicate_initializers__disable` function. Write a Python function `def jit_pass_onnx_deduplicate_initializers__disable(graph, param_dict, arg2)` to solve the following problem:
This pass will disable TensorRT topk export. disable for TensorRT.
Here is the function:
def jit_pass_onnx_deduplicate_initializers__disable(graph, param_dict, arg2):
"""This pass will disable TensorRT topk export.
disable for TensorRT.
"""
return param_dict | This pass will disable TensorRT topk export. disable for TensorRT. |
188,548 | from typing import Callable
import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `jit_pass_onnx_autograd_function_process__disable` function. Write a Python function `def jit_pass_onnx_autograd_function_process__disable(graph)` to solve the following problem:
Disable process autograph function.
Here is the function:
def jit_pass_onnx_autograd_function_process__disable(graph):
"""Disable process autograph function."""
return | Disable process autograph function. |
188,549 | from typing import Dict, List
import mmengine
from mmdeploy.backend.openvino import ModelOptimizerOptions
from mmdeploy.utils import get_model_inputs
from mmdeploy.utils.config_utils import get_backend_config, get_ir_config
def update_input_names(input_info: Dict[str, List],
input_names: List[str]) -> Dict[str, List]:
"""Replaces the default input name in 'input_info' with the value from the
deployment config, if they differ.
Args:
input_info (Dict[str, List]): Names and shapes of input.
input_names (List[str]): Input names from the deployment config.
Returns:
Dict[str, List]: A dict that stores the names and shapes of input.
"""
input_info_keys = set(input_info.keys())
input_names = set(input_names)
if input_info_keys != input_names:
old_names = input_info_keys - input_names
new_names = input_names - input_info_keys
for new_key, old_key in zip(new_names, old_names):
input_info[new_key] = input_info.pop(old_key)
return input_info
def get_ir_config(deploy_cfg: Union[str, mmengine.Config]) -> Dict:
"""Get the IR parameters in export() from config.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
Dict: The config dictionary of IR parameters
"""
deploy_cfg = load_config(deploy_cfg)[0]
ir_config = deploy_cfg.get('ir_config', None)
if ir_config is None:
# TODO: deprecate in future
ir_config = deploy_cfg.get('onnx_config', {})
return ir_config
The provided code snippet includes necessary dependencies for implementing the `get_input_info_from_cfg` function. Write a Python function `def get_input_info_from_cfg(deploy_cfg: mmengine.Config) -> Dict[str, List]` to solve the following problem:
Get the input names and shapes from the configs for OpenVINO Model Optimizer. Args: deploy_cfg (mmengine.Config): Deployment config. Returns: Dict[str, List]: A dict that stores the names and shapes of input.
Here is the function:
def get_input_info_from_cfg(deploy_cfg: mmengine.Config) -> Dict[str, List]:
"""Get the input names and shapes from the configs for OpenVINO Model
Optimizer.
Args:
deploy_cfg (mmengine.Config): Deployment config.
Returns:
Dict[str, List]: A dict that stores the names and shapes of input.
"""
# The partition is not supported now. Set the id of model to 0.
model_inputs = get_model_inputs(deploy_cfg)[0]
input_info = model_inputs['opt_shapes']
ir_config = get_ir_config(deploy_cfg)
if ir_config is not None:
input_names = ir_config.get('input_names', None)
if input_names:
if not isinstance(input_info, Dict):
input_info = dict(zip(input_names, input_info))
input_info = update_input_names(input_info, input_names)
return input_info | Get the input names and shapes from the configs for OpenVINO Model Optimizer. Args: deploy_cfg (mmengine.Config): Deployment config. Returns: Dict[str, List]: A dict that stores the names and shapes of input. |
188,550 | from typing import Dict, List
import mmengine
from mmdeploy.backend.openvino import ModelOptimizerOptions
from mmdeploy.utils import get_model_inputs
from mmdeploy.utils.config_utils import get_backend_config, get_ir_config
def get_backend_config(deploy_cfg: Union[str, mmengine.Config]) -> Dict:
"""Get the backend_config from the config.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
Dict : backend config dict.
"""
deploy_cfg = load_config(deploy_cfg)[0]
backend_config = deploy_cfg.get('backend_config', {})
return backend_config
The provided code snippet includes necessary dependencies for implementing the `get_mo_options_from_cfg` function. Write a Python function `def get_mo_options_from_cfg( deploy_cfg: mmengine.Config) -> ModelOptimizerOptions` to solve the following problem:
Get additional parameters for the Model Optimizer from the deploy config. Args: deploy_cfg (mmengine.Config): Deployment config. Returns: ModelOptimizerOptions: A class that will contain additional arguments.
Here is the function:
def get_mo_options_from_cfg(
deploy_cfg: mmengine.Config) -> ModelOptimizerOptions:
"""Get additional parameters for the Model Optimizer from the deploy
config.
Args:
deploy_cfg (mmengine.Config): Deployment config.
Returns:
ModelOptimizerOptions: A class that will contain additional arguments.
"""
backend_config = get_backend_config(deploy_cfg)
mo_options = backend_config.get('mo_options', None)
mo_options = ModelOptimizerOptions(mo_options)
return mo_options | Get additional parameters for the Model Optimizer from the deploy config. Args: deploy_cfg (mmengine.Config): Deployment config. Returns: ModelOptimizerOptions: A class that will contain additional arguments. |
188,551 | from copy import deepcopy
from typing import Callable, Dict, Optional
import torch
from torch.utils.data import DataLoader
from ..core import PIPELINE_MANAGER
The provided code snippet includes necessary dependencies for implementing the `create_calib_input_data` function. Write a Python function `def create_calib_input_data(calib_file: str, model: torch.nn.Module, dataloader: DataLoader, get_tensor_func: Optional[Callable] = None, inference_func: Optional[Callable] = None, model_partition: bool = False, context_info: Dict = dict(), device: str = 'cpu') -> None` to solve the following problem:
Create calibration table. Examples: >>> from mmdeploy.apis.utils import create_calib_input_data >>> from mmdeploy.utils import get_calib_filename, load_config >>> deploy_cfg = 'configs/mmdet/detection/' 'detection_tensorrt-int8_dynamic-320x320-1344x1344.py' >>> deploy_cfg = load_config(deploy_cfg)[0] >>> calib_file = get_calib_filename(deploy_cfg) >>> model_cfg = 'mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' >>> model_checkpoint = 'checkpoints/' 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth' >>> create_calib_input_data(calib_file, deploy_cfg, model_cfg, model_checkpoint, device='cuda:0') Args: calib_file (str): Input calibration file. deploy_cfg (str | mmengine.Config): Deployment config. model_cfg (str | mmengine.Config): The model config. model_checkpoint (str): PyTorch model checkpoint, defaults to `None`. dataset_cfg (str | mmengine.Config): Dataset config, defaults to `None` dataset_type (str): A string specifying dataset type, e.g.: 'test', 'val', defaults to 'val'. device (str): Specifying the device to run on, defaults to 'cpu'.
Here is the function:
def create_calib_input_data(calib_file: str,
model: torch.nn.Module,
dataloader: DataLoader,
get_tensor_func: Optional[Callable] = None,
inference_func: Optional[Callable] = None,
model_partition: bool = False,
context_info: Dict = dict(),
device: str = 'cpu') -> None:
"""Create calibration table.
Examples:
>>> from mmdeploy.apis.utils import create_calib_input_data
>>> from mmdeploy.utils import get_calib_filename, load_config
>>> deploy_cfg = 'configs/mmdet/detection/'
'detection_tensorrt-int8_dynamic-320x320-1344x1344.py'
>>> deploy_cfg = load_config(deploy_cfg)[0]
>>> calib_file = get_calib_filename(deploy_cfg)
>>> model_cfg = 'mmdetection/configs/fcos/'
'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
>>> model_checkpoint = 'checkpoints/'
'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth'
>>> create_calib_input_data(calib_file, deploy_cfg,
model_cfg, model_checkpoint, device='cuda:0')
Args:
calib_file (str): Input calibration file.
deploy_cfg (str | mmengine.Config): Deployment config.
model_cfg (str | mmengine.Config): The model config.
model_checkpoint (str): PyTorch model checkpoint, defaults to `None`.
dataset_cfg (str | mmengine.Config): Dataset config, defaults to `None`
dataset_type (str): A string specifying dataset type, e.g.: 'test',
'val', defaults to 'val'.
device (str): Specifying the device to run on, defaults to 'cpu'.
"""
import h5py
import tqdm
from mmdeploy.core import RewriterContext, reset_mark_function_count
backend = 'default'
with h5py.File(calib_file, mode='w') as file:
calib_data_group = file.create_group('calib_data')
if not model_partition:
# create end2end group
input_data_group = calib_data_group.create_group('end2end')
input_group = input_data_group.create_group('input')
for data_id, input_data in enumerate(tqdm.tqdm(dataloader)):
if not model_partition:
# save end2end data
if get_tensor_func is not None:
input_tensor = get_tensor_func(input_data)
else:
input_tensor = input_data
input_ndarray = input_tensor.detach().cpu().numpy()
input_group.create_dataset(
str(data_id),
shape=input_ndarray.shape,
compression='gzip',
compression_opts=4,
data=input_ndarray)
else:
context_info_ = deepcopy(context_info)
if 'cfg' not in context_info:
context_info_['cfg'] = dict()
context_info_['backend'] = backend
context_info_['create_calib'] = True
context_info_['calib_file'] = file
context_info_['data_id'] = data_id
with torch.no_grad(), RewriterContext(**context_info_):
reset_mark_function_count()
if inference_func is not None:
inference_func(model, input_data)
else:
model(input_data)
file.flush() | Create calibration table. Examples: >>> from mmdeploy.apis.utils import create_calib_input_data >>> from mmdeploy.utils import get_calib_filename, load_config >>> deploy_cfg = 'configs/mmdet/detection/' 'detection_tensorrt-int8_dynamic-320x320-1344x1344.py' >>> deploy_cfg = load_config(deploy_cfg)[0] >>> calib_file = get_calib_filename(deploy_cfg) >>> model_cfg = 'mmdetection/configs/fcos/' 'fcos_r50_caffe_fpn_gn-head_1x_coco.py' >>> model_checkpoint = 'checkpoints/' 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth' >>> create_calib_input_data(calib_file, deploy_cfg, model_cfg, model_checkpoint, device='cuda:0') Args: calib_file (str): Input calibration file. deploy_cfg (str | mmengine.Config): Deployment config. model_cfg (str | mmengine.Config): The model config. model_checkpoint (str): PyTorch model checkpoint, defaults to `None`. dataset_cfg (str | mmengine.Config): Dataset config, defaults to `None` dataset_type (str): A string specifying dataset type, e.g.: 'test', 'val', defaults to 'val'. device (str): Specifying the device to run on, defaults to 'cpu'. |
188,552 | import logging
from typing import Any, Optional, Sequence
import mmengine
from mmdeploy.codebase import BaseTask, get_codebase_class, import_codebase
from mmdeploy.utils import (get_backend, get_codebase, get_task_type,
parse_device_id)
from mmdeploy.utils.config_utils import get_codebase_external_module
from ..core import PIPELINE_MANAGER
def check_backend_device(deploy_cfg: mmengine.Config, device: str):
"""Check if device is appropriate for the backend.
Args:
deploy_cfg (str | mmengine.Config): Deployment config file.
device (str): A string specifying device type.
"""
backend = get_backend(deploy_cfg).value
device_id = parse_device_id(device)
mismatch = dict(
tensorrt=lambda id: id == -1,
openvino=lambda id: id > -1,
)
if backend in mismatch and mismatch[backend](device_id):
raise ValueError(f'{device} is invalid for the backend {backend}')
def import_codebase(codebase_type: Codebase, custom_module_list: List = []):
"""Import a codebase package in `mmdeploy.codebase`
The function will check if all dependent libraries are installed.
For example, to import `mmdeploy.codebase.mmdet`, `mmdet` must be
installed. To import `mmdeploy.codebase.mmocr`, `mmdet` and `mmocr`
must be installed.
Args:
codebase (Codebase): The codebase to import.
"""
import importlib
codebase_name = codebase_type.value
dependent_library = [codebase_name] + \
extra_dependent_library.get(codebase_type, [])
for lib in dependent_library + custom_module_list:
if not importlib.util.find_spec(lib):
raise ImportError(f'{lib} has not been installed. '
f'Import {lib} failed.')
if len(custom_module_list) > 0:
for custom_module in custom_module_list:
importlib.import_module(f'{custom_module}')
codebase = get_codebase_class(codebase_type)
codebase.register_all_modules()
def get_codebase_external_module(
deploy_cfg: Union[str, mmengine.Config]) -> List:
return get_codebase_config(deploy_cfg).get('module', [])
The provided code snippet includes necessary dependencies for implementing the `build_task_processor` function. Write a Python function `def build_task_processor(model_cfg: mmengine.Config, deploy_cfg: mmengine.Config, device: str) -> BaseTask` to solve the following problem:
Build a task processor to manage the deployment pipeline. Args: model_cfg (str | mmengine.Config): Model config file. deploy_cfg (str | mmengine.Config): Deployment config file. device (str): A string specifying device type. Returns: BaseTask: A task processor.
Here is the function:
def build_task_processor(model_cfg: mmengine.Config,
deploy_cfg: mmengine.Config, device: str) -> BaseTask:
"""Build a task processor to manage the deployment pipeline.
Args:
model_cfg (str | mmengine.Config): Model config file.
deploy_cfg (str | mmengine.Config): Deployment config file.
device (str): A string specifying device type.
Returns:
BaseTask: A task processor.
"""
check_backend_device(deploy_cfg=deploy_cfg, device=device)
codebase_type = get_codebase(deploy_cfg, model_cfg=model_cfg)
custom_module_list = get_codebase_external_module(deploy_cfg)
import_codebase(codebase_type, custom_module_list)
codebase = get_codebase_class(codebase_type)
return codebase.build_task_processor(model_cfg, deploy_cfg, device) | Build a task processor to manage the deployment pipeline. Args: model_cfg (str | mmengine.Config): Model config file. deploy_cfg (str | mmengine.Config): Deployment config file. device (str): A string specifying device type. Returns: BaseTask: A task processor. |
188,553 | import logging
from typing import Any, Optional, Sequence
import mmengine
from mmdeploy.codebase import BaseTask, get_codebase_class, import_codebase
from mmdeploy.utils import (get_backend, get_codebase, get_task_type,
parse_device_id)
from mmdeploy.utils.config_utils import get_codebase_external_module
from ..core import PIPELINE_MANAGER
def import_codebase(codebase_type: Codebase, custom_module_list: List = []):
"""Import a codebase package in `mmdeploy.codebase`
The function will check if all dependent libraries are installed.
For example, to import `mmdeploy.codebase.mmdet`, `mmdet` must be
installed. To import `mmdeploy.codebase.mmocr`, `mmdet` and `mmocr`
must be installed.
Args:
codebase (Codebase): The codebase to import.
"""
import importlib
codebase_name = codebase_type.value
dependent_library = [codebase_name] + \
extra_dependent_library.get(codebase_type, [])
for lib in dependent_library + custom_module_list:
if not importlib.util.find_spec(lib):
raise ImportError(f'{lib} has not been installed. '
f'Import {lib} failed.')
if len(custom_module_list) > 0:
for custom_module in custom_module_list:
importlib.import_module(f'{custom_module}')
codebase = get_codebase_class(codebase_type)
codebase.register_all_modules()
def get_codebase_external_module(
deploy_cfg: Union[str, mmengine.Config]) -> List:
return get_codebase_config(deploy_cfg).get('module', [])
The provided code snippet includes necessary dependencies for implementing the `get_predefined_partition_cfg` function. Write a Python function `def get_predefined_partition_cfg(deploy_cfg: mmengine.Config, partition_type: str)` to solve the following problem:
Get the predefined partition config. Notes: Currently only support mmdet codebase. Args: deploy_cfg (mmengine.Config): use deploy config to get the codebase and task type. partition_type (str): A string specifying partition type. Returns: dict: A dictionary of partition config.
Here is the function:
def get_predefined_partition_cfg(deploy_cfg: mmengine.Config,
partition_type: str):
"""Get the predefined partition config.
Notes:
Currently only support mmdet codebase.
Args:
deploy_cfg (mmengine.Config): use deploy config to get the codebase and
task type.
partition_type (str): A string specifying partition type.
Returns:
dict: A dictionary of partition config.
"""
codebase_type = get_codebase(deploy_cfg)
custom_module_list = get_codebase_external_module(deploy_cfg)
import_codebase(codebase_type, custom_module_list)
task = get_task_type(deploy_cfg)
codebase = get_codebase_class(codebase_type)
task_processor_class = codebase.get_task_class(task)
return task_processor_class.get_partition_cfg(partition_type) | Get the predefined partition config. Notes: Currently only support mmdet codebase. Args: deploy_cfg (mmengine.Config): use deploy config to get the codebase and task type. partition_type (str): A string specifying partition type. Returns: dict: A dictionary of partition config. |
188,554 | import logging
from typing import Any, Optional, Sequence
import mmengine
from mmdeploy.codebase import BaseTask, get_codebase_class, import_codebase
from mmdeploy.utils import (get_backend, get_codebase, get_task_type,
parse_device_id)
from mmdeploy.utils.config_utils import get_codebase_external_module
from ..core import PIPELINE_MANAGER
import logging
The provided code snippet includes necessary dependencies for implementing the `to_backend` function. Write a Python function `def to_backend(backend_name: str, ir_files: Sequence[str], work_dir: str, deploy_cfg: Optional[Any] = None, log_level: int = logging.INFO, device: str = 'cpu', **kwargs) -> Sequence[str]` to solve the following problem:
Convert intermediate representation to given backend. Args: backend_name (str): The name of the backend. ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be save in this directory. deploy_cfg (Any): The deploy config. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.
Here is the function:
def to_backend(backend_name: str,
ir_files: Sequence[str],
work_dir: str,
deploy_cfg: Optional[Any] = None,
log_level: int = logging.INFO,
device: str = 'cpu',
**kwargs) -> Sequence[str]:
"""Convert intermediate representation to given backend.
Args:
backend_name (str): The name of the backend.
ir_files (Sequence[str]): The intermediate representation files.
work_dir (str): The work directory, backend files and logs should
be save in this directory.
deploy_cfg (Any): The deploy config.
log_level (int, optional): The log level. Defaults to logging.INFO.
device (str, optional): The device type. Defaults to 'cpu'.
Returns:
Sequence[str]: Backend files.
"""
from mmdeploy.backend.base import get_backend_manager
backend_mgr = get_backend_manager(backend_name)
return backend_mgr.to_backend(
ir_files=ir_files,
work_dir=work_dir,
deploy_cfg=deploy_cfg,
log_level=log_level,
device=device,
**kwargs) | Convert intermediate representation to given backend. Args: backend_name (str): The name of the backend. ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be save in this directory. deploy_cfg (Any): The deploy config. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files. |
188,555 | import re
import onnx
from packaging import version
The provided code snippet includes necessary dependencies for implementing the `parse_extractor_io_string` function. Write a Python function `def parse_extractor_io_string(io_str) -> tuple` to solve the following problem:
Parse IO string for extractor.
Here is the function:
def parse_extractor_io_string(io_str) -> tuple:
"""Parse IO string for extractor."""
name, io_type = io_str.split(':')
assert io_type in ['input', 'output']
func_id = 0
search_result = re.search(r'^(.+)\[([0-9]+)\]$', name)
if search_result is not None:
name = search_result.group(1)
func_id = int(search_result.group(2))
return name, func_id, io_type | Parse IO string for extractor. |
188,556 | import re
import onnx
from packaging import version
def _dfs_search_reachable_nodes_fast(self, node_output_name, graph_input_nodes,
reachable_nodes):
"""Using DFS to search reachable nodes."""
outputs = {}
for index, node in enumerate(self.graph.node):
for name in node.output:
if name not in outputs:
outputs[name] = set()
outputs[name].add(index)
def impl(node_output_name, graph_input_nodes, reachable_nodes):
if node_output_name in graph_input_nodes:
return
if node_output_name not in outputs:
return
for index in outputs[node_output_name]:
node = self.graph.node[index]
if node in reachable_nodes:
continue
reachable_nodes.append(node)
for name in node.input:
impl(name, graph_input_nodes, reachable_nodes)
impl(node_output_name, graph_input_nodes, reachable_nodes)
The provided code snippet includes necessary dependencies for implementing the `create_extractor` function. Write a Python function `def create_extractor(model: onnx.ModelProto) -> onnx.utils.Extractor` to solve the following problem:
Create Extractor for ONNX. Args: model (onnx.ModelProto): An input onnx model. Returns: onnx.utils.Extractor: Extractor for the onnx.
Here is the function:
def create_extractor(model: onnx.ModelProto) -> onnx.utils.Extractor:
"""Create Extractor for ONNX.
Args:
model (onnx.ModelProto): An input onnx model.
Returns:
onnx.utils.Extractor: Extractor for the onnx.
"""
assert version.parse(onnx.__version__) >= version.parse('1.8.0')
# patch extractor
onnx.utils.Extractor._dfs_search_reachable_nodes = \
_dfs_search_reachable_nodes_fast
extractor = onnx.utils.Extractor(model)
return extractor | Create Extractor for ONNX. Args: model (onnx.ModelProto): An input onnx model. Returns: onnx.utils.Extractor: Extractor for the onnx. |
188,557 | import inspect
from typing import Any, Callable, Dict, Optional, Sequence
import torch
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import IR, cfg_apply_marks, get_partition_config
MARK_FUNCTION_COUNT = dict()
The provided code snippet includes necessary dependencies for implementing the `reset_mark_function_count` function. Write a Python function `def reset_mark_function_count()` to solve the following problem:
Reset counter of mark function.
Here is the function:
def reset_mark_function_count():
"""Reset counter of mark function."""
for k in MARK_FUNCTION_COUNT:
MARK_FUNCTION_COUNT[k] = 0 | Reset counter of mark function. |
188,558 | import inspect
from typing import Any, Callable, Dict, Optional, Sequence
import torch
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import IR, cfg_apply_marks, get_partition_config
The provided code snippet includes necessary dependencies for implementing the `mark_symbolic` function. Write a Python function `def mark_symbolic(g, x, *args)` to solve the following problem:
Rewrite symbolic of mark op.
Here is the function:
def mark_symbolic(g, x, *args):
"""Rewrite symbolic of mark op."""
ctx = FUNCTION_REWRITER.get_context()
if cfg_apply_marks(ctx.cfg):
return ctx.origin_func(g, x, *args)
return x | Rewrite symbolic of mark op. |
188,559 | import inspect
from typing import Any, Callable, Dict, Optional, Sequence
import torch
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import IR, cfg_apply_marks, get_partition_config
The provided code snippet includes necessary dependencies for implementing the `forward_of_mark` function. Write a Python function `def forward_of_mark(ctx, x, dtype, shape, func, func_id, type, name, id, attrs) -> torch.Tensor` to solve the following problem:
Rewrite forward of mark op.
Here is the function:
def forward_of_mark(ctx, x, dtype, shape, func, func_id, type, name, id,
attrs) -> torch.Tensor:
"""Rewrite forward of mark op."""
rewriter = FUNCTION_REWRITER.get_context()
deploy_cfg = rewriter.cfg
# save calib data
apply_marks = cfg_apply_marks(deploy_cfg)
create_calib = getattr(rewriter, 'create_calib', False)
if apply_marks and create_calib:
partition_params = get_partition_config(deploy_cfg)
assert partition_params is not None, 'No partition config.'
partition_type = partition_params['type']
from mmdeploy.apis import get_predefined_partition_cfg
partition_cfgs = get_predefined_partition_cfg(deploy_cfg,
partition_type)
assert hasattr(rewriter, 'calib_file')
for partition_id, partition_cfg in enumerate(partition_cfgs):
start = partition_cfg['start']
if (f'{func}:{type}' not in start) and (f'{func}[{func_id}]:{type}'
not in start):
continue
input_name = name
dynamic_axes = partition_cfg.get('dynamic_axes', None)
if dynamic_axes is not None:
input_name = name
calib_file = rewriter.calib_file
calib_data_group = calib_file['calib_data']
partition_name = f'partition{partition_id}'
if partition_name not in calib_data_group:
calib_data_group.create_group(partition_name)
partition_group = calib_data_group[partition_name]
if input_name not in partition_group:
partition_group.create_group(input_name)
input_data_group = partition_group[input_name]
data_id = rewriter.data_id
x_np = x.detach().cpu().numpy()
input_data_group.create_dataset(
str(data_id),
shape=x_np.shape,
compression='gzip',
compression_opts=4,
data=x_np)
return rewriter.origin_func(ctx, x, dtype, shape, func, func_id, type,
name, id, attrs) | Rewrite forward of mark op. |
188,560 | import inspect
from typing import Any, Callable, Dict, Optional, Sequence
import torch
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import IR, cfg_apply_marks, get_partition_config
The provided code snippet includes necessary dependencies for implementing the `remove_mark__torchscript` function. Write a Python function `def remove_mark__torchscript(xs: Any, *args, **kwargs)` to solve the following problem:
Disable all marks for TorchScript backend. As the Node `mark` is not able to be traced, we just return original input for the function `mark_tensors`. Args: xs (Any): Input structure which contains tensor.
Here is the function:
def remove_mark__torchscript(xs: Any, *args, **kwargs):
"""Disable all marks for TorchScript backend.
As the Node `mark` is not able to be traced, we just return original input
for the function `mark_tensors`.
Args:
xs (Any): Input structure which contains tensor.
"""
return xs | Disable all marks for TorchScript backend. As the Node `mark` is not able to be traced, we just return original input for the function `mark_tensors`. Args: xs (Any): Input structure which contains tensor. |
188,561 | import inspect
from typing import Any, Callable, Dict, Optional, Sequence
import torch
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from mmdeploy.utils import IR, cfg_apply_marks, get_partition_config
MARK_FUNCTION_COUNT = dict()
def mark_tensors(xs: Any, func: str, func_id: int, io_type: str, ctx: Any,
attrs: Dict, is_inspecting: bool, level: int) -> tuple:
"""Add mark node recursively.
Args:
xs (Any): Input structure which contains tensor.
func (str): Function name of the function which xs comes from.
func_id (int): Function index of `func` in the model.
io_type (str): The io type of xs, `input` or `output`.
ctx (Any): The context instance.
attrs (Dict): The extra attributes provided by mark decorator.
is_inspecting (bool): The names of xs are inspected or not.
level (int): The recursive level.
Returns:
Any: The same structure as xs, all tensor has been replaced with Mark.
"""
visit = set()
index = 0
def impl(ys, prefix, level):
nonlocal index
old_index = index
ret = ys
prefix = () if level == 0 else prefix
if isinstance(ys, torch.Tensor):
if ys not in visit:
visit.add(ys)
root = ctx.names[ctx.index]
name = '.'.join(str(x) for x in (root, *prefix))
ys_shape = tuple(int(s) for s in ys.shape)
ret = Mark.apply(ys, ys.dtype, ys_shape, func, func_id,
io_type, name, index, attrs)
index += 1
elif isinstance(ys, list):
ret = [
impl(y, prefix + (i, ), level + 1) for i, y in enumerate(ys)
]
elif isinstance(ys, tuple):
ret = tuple(
impl(y, prefix + (i, ), level + 1) for i, y in enumerate(ys))
elif isinstance(ys, dict):
ret = {
k: impl(v, prefix + (k, ), level + 1)
for k, v in ys.items()
}
if level == 0 and (is_inspecting or old_index != index):
ctx.index += 1
return ret
return impl(xs, (), level)
'mmdeploy.core.optimizers.function_marker.mark_tensors', ir=IR.TORCHSCRIPT)
The provided code snippet includes necessary dependencies for implementing the `mark` function. Write a Python function `def mark(func_name: Optional[str] = None, inputs: Optional[Sequence[str]] = None, outputs: Optional[Sequence[str]] = None, **attrs) -> Callable` to solve the following problem:
The decorator used to add mark node. Mark node can be used to support model partition. Args: func_name (str): The name of the function where marks come from. inputs (Sequence[str]): The input names of the marks. The final name \ might have suffix if inputs is list or dictionary. outputs (Sequence[str]): The output names of the marks. The final \ name might have suffix if outputs is list or dictionary. Returns: Callable: The process of mark decorator. Examples: >>> from mmdeploy.core import FUNCTION_REWRITER, mark >>> @FUNCTION_REWRITER.register_rewriter( >>> func_name='mmdet.models.roi_heads.ConvFCBBoxHead.forward') >>> def forward_of_bbox_head(self, x): >>> ctx = FUNCTION_REWRITER.get_context() >>> @mark( >>> 'bbox_head_forward', >>> inputs=['bbox_feats'], >>> outputs=['cls_score', 'bbox_pred']) >>> def _impl(): >>> return ctx.origin_func(self, x) >>> return _impl()
Here is the function:
def mark(func_name: Optional[str] = None,
inputs: Optional[Sequence[str]] = None,
outputs: Optional[Sequence[str]] = None,
**attrs) -> Callable:
"""The decorator used to add mark node.
Mark node can be used to support model partition.
Args:
func_name (str): The name of the function where marks come from.
inputs (Sequence[str]): The input names of the marks. The final name \
might have suffix if inputs is list or dictionary.
outputs (Sequence[str]): The output names of the marks. The final \
name might have suffix if outputs is list or dictionary.
Returns:
Callable: The process of mark decorator.
Examples:
>>> from mmdeploy.core import FUNCTION_REWRITER, mark
>>> @FUNCTION_REWRITER.register_rewriter(
>>> func_name='mmdet.models.roi_heads.ConvFCBBoxHead.forward')
>>> def forward_of_bbox_head(self, x):
>>> ctx = FUNCTION_REWRITER.get_context()
>>> @mark(
>>> 'bbox_head_forward',
>>> inputs=['bbox_feats'],
>>> outputs=['cls_score', 'bbox_pred'])
>>> def _impl():
>>> return ctx.origin_func(self, x)
>>> return _impl()
"""
MARK_FUNCTION_COUNT[func_name] = 0
class Context:
def __init__(self, names):
self.names = names
self.index = 0
def decorator(f):
func = func_name if func_name else f.__name__
is_inspect = False
if not inputs:
input_names = list(inspect.signature(f).parameters.keys())
is_inspect = True
else:
input_names = inputs
output_names = outputs if outputs else func
# args and retvals match corresponding names at level 0
args_level, rets_level = -1, -1
if isinstance(input_names, str):
input_names = (input_names, )
if isinstance(output_names, str):
output_names = (output_names, )
rets_level += 1
def g(*args, **kwargs):
func_id = MARK_FUNCTION_COUNT[func_name]
MARK_FUNCTION_COUNT[func_name] += 1
ctx = Context(input_names)
args = mark_tensors(args, func, func_id, 'input', ctx, attrs,
is_inspect, args_level)
rets = f(*args, **kwargs)
ctx = Context(output_names)
func_ret = mark_tensors(rets, func, func_id, 'output', ctx, attrs,
False, rets_level)
return func_ret
return g
return decorator | The decorator used to add mark node. Mark node can be used to support model partition. Args: func_name (str): The name of the function where marks come from. inputs (Sequence[str]): The input names of the marks. The final name \ might have suffix if inputs is list or dictionary. outputs (Sequence[str]): The output names of the marks. The final \ name might have suffix if outputs is list or dictionary. Returns: Callable: The process of mark decorator. Examples: >>> from mmdeploy.core import FUNCTION_REWRITER, mark >>> @FUNCTION_REWRITER.register_rewriter( >>> func_name='mmdet.models.roi_heads.ConvFCBBoxHead.forward') >>> def forward_of_bbox_head(self, x): >>> ctx = FUNCTION_REWRITER.get_context() >>> @mark( >>> 'bbox_head_forward', >>> inputs=['bbox_feats'], >>> outputs=['cls_score', 'bbox_pred']) >>> def _impl(): >>> return ctx.origin_func(self, x) >>> return _impl() |
188,562 | from typing import Callable, Dict, Iterable, Optional
import onnx
from onnx.helper import get_attribute_value
from mmdeploy.utils import get_root_logger
def attribute_to_dict(attr: onnx.AttributeProto) -> Dict:
"""Convert onnx op attribute to dict.
Args:
attr (onnx.AttributeProto): Input onnx op attribute.
Returns:
dict: A dict contains info from op attribute.
"""
ret = {}
for a in attr:
value = get_attribute_value(a)
if isinstance(value, bytes):
value = str(value, 'utf-8')
ret[a.name] = value
return ret
The provided code snippet includes necessary dependencies for implementing the `is_unused_mark` function. Write a Python function `def is_unused_mark(marks: Iterable[onnx.NodeProto]) -> Callable` to solve the following problem:
Check whether a mark is unused. Args: marks (Iterable[onnx.NodeProto]): A list of onnx NodeProto. Returns: Callable: The function to check if a mark node is in `marks`.
Here is the function:
def is_unused_mark(marks: Iterable[onnx.NodeProto]) -> Callable:
"""Check whether a mark is unused.
Args:
marks (Iterable[onnx.NodeProto]): A list of onnx NodeProto.
Returns:
Callable: The function to check if a mark node is in `marks`.
"""
def f(node):
if node.op_type == 'Mark':
attr = attribute_to_dict(node.attribute)
name = attr['func'] + ':' + attr['type']
if name not in marks:
return True
return False
return f | Check whether a mark is unused. Args: marks (Iterable[onnx.NodeProto]): A list of onnx NodeProto. Returns: Callable: The function to check if a mark node is in `marks`. |
188,563 | from typing import Callable, Dict, Iterable, Optional
import onnx
from onnx.helper import get_attribute_value
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `get_new_name` function. Write a Python function `def get_new_name(attrs: Dict[str, str], mark_name: str = '', name_map: Optional[Dict[str, str]] = None) -> str` to solve the following problem:
Get new name for a node. Args: attrs (Dict[str, str]): A dict contains attributes of an ONNX node. mark_name (str): The input mark op name. Default is ''. name_map (Dict[str, str]): A mapping of node names, defaults to `None`. Returns: str: The new node name.
Here is the function:
def get_new_name(attrs: Dict[str, str],
mark_name: str = '',
name_map: Optional[Dict[str, str]] = None) -> str:
"""Get new name for a node.
Args:
attrs (Dict[str, str]): A dict contains attributes of an ONNX node.
mark_name (str): The input mark op name. Default is ''.
name_map (Dict[str, str]): A mapping of node names, defaults to
`None`.
Returns:
str: The new node name.
"""
if 'name' in attrs:
new_name = attrs['name']
else:
new_name = '_'.join((attrs['func'], attrs['type'], str(attrs['id'])))
if name_map is not None:
if new_name in name_map:
return name_map[new_name]
if f'{mark_name}:{new_name}' in name_map:
return name_map[f'{mark_name}:{new_name}']
return new_name | Get new name for a node. Args: attrs (Dict[str, str]): A dict contains attributes of an ONNX node. mark_name (str): The input mark op name. Default is ''. name_map (Dict[str, str]): A mapping of node names, defaults to `None`. Returns: str: The new node name. |
188,564 | from typing import Callable, Dict, Iterable, Optional
import onnx
from onnx.helper import get_attribute_value
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `rename_value` function. Write a Python function `def rename_value(model: onnx.ModelProto, old_name: str, new_name: str)` to solve the following problem:
Rename a node in an ONNX model. Args: model (onnx.ModelProto): Input onnx model. old_name (str): Original node name in the model. new_name (str): New node name in the model.
Here is the function:
def rename_value(model: onnx.ModelProto, old_name: str, new_name: str):
"""Rename a node in an ONNX model.
Args:
model (onnx.ModelProto): Input onnx model.
old_name (str): Original node name in the model.
new_name (str): New node name in the model.
"""
if old_name == new_name:
return
logger = get_root_logger()
logger.info(f'rename {old_name} -> {new_name}')
for n in model.graph.node:
for i, output in enumerate(n.output):
if output == old_name:
n.output[i] = new_name
for i, input in enumerate(n.input):
if input == old_name:
n.input[i] = new_name
for v in model.graph.value_info:
if v.name == old_name:
v.name = new_name
for i, input in enumerate(model.graph.input):
if input.name == old_name:
input.name = new_name
for i, output in enumerate(model.graph.output):
if output.name == old_name:
output.name = new_name | Rename a node in an ONNX model. Args: model (onnx.ModelProto): Input onnx model. old_name (str): Original node name in the model. new_name (str): New node name in the model. |
188,565 | from typing import Callable, Dict, Iterable, Optional
import onnx
from onnx.helper import get_attribute_value
from mmdeploy.utils import get_root_logger
def remove_nodes(model: onnx.ModelProto,
predicate: Callable) -> onnx.ModelProto:
"""Remove nodes from ONNX model.
Args:
model (onnx.ModelProto): Input onnx model.
predicate (Callable): A function to predicate a node.
Returns:
onnx.ModelProto: Modified onnx model.
"""
# ! this doesn't handle inputs/outputs
logger = get_root_logger()
while True:
connect = None
for i, node in enumerate(model.graph.node):
if predicate(node):
assert len(node.input) == 1
assert len(node.output) == 1
connect = (node.input[0], node.output[0])
logger.info(f'remove node {node.name}')
del model.graph.node[i]
break
if not connect:
break
src, dst = connect
for node in model.graph.node:
for i, input in enumerate(node.input):
if input == dst:
node.input[i] = src
return model
def is_identity(node: onnx.NodeProto) -> bool:
"""Check if an op is identity."""
return node.op_type == 'Identity'
The provided code snippet includes necessary dependencies for implementing the `remove_identity` function. Write a Python function `def remove_identity(model: onnx.ModelProto)` to solve the following problem:
Remove identity node from an ONNX model. Args: model (onnx.ModelProto): Input onnx model.
Here is the function:
def remove_identity(model: onnx.ModelProto):
"""Remove identity node from an ONNX model.
Args:
model (onnx.ModelProto): Input onnx model.
"""
graph = model.graph
def simplify_inputs():
connect = None
logger = get_root_logger()
for input in graph.input:
for i, node in enumerate(graph.node):
if node.op_type == 'Identity' and node.input[0] == input.name:
connect = (node.input[0], node.output[0])
logger.info(f'remove node {node.name}')
del graph.node[i]
break
if connect:
break
if not connect:
return False
src, dst = connect
for node in graph.node:
for i, input_name in enumerate(node.input):
if input_name == dst:
node.input[i] = src
# the input just changed won't be an output
return True
def simplify_outputs():
connect = None
logger = get_root_logger()
for output in graph.output:
for i, node in enumerate(graph.node):
if node.op_type == 'Identity' and \
node.output[0] == output.name:
connect = (node.input[0], node.output[0])
logger.info(f'remove node {node.name}')
del graph.node[i]
break
if connect:
break
if not connect:
return False
src, dst = connect
for node in graph.node:
for i, output_name in enumerate(node.output):
if output_name == src:
node.output[i] = dst
# the output just renamed may be someone's input
for i, input_name in enumerate(node.input):
if input_name == src:
node.input[i] = dst
return True
while simplify_inputs():
pass
while simplify_outputs():
pass
remove_nodes(model, is_identity) | Remove identity node from an ONNX model. Args: model (onnx.ModelProto): Input onnx model. |
188,566 | from typing import Callable, Dict, Iterable, Optional
import onnx
from onnx.helper import get_attribute_value
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `remove_imports` function. Write a Python function `def remove_imports(model: onnx.ModelProto)` to solve the following problem:
Remove useless imports from an ONNX model. The domain like `mmdeploy` might influence model conversion for some backends. Args: model (onnx.ModelProto): Input onnx model.
Here is the function:
def remove_imports(model: onnx.ModelProto):
"""Remove useless imports from an ONNX model.
The domain like `mmdeploy` might influence model conversion for
some backends.
Args:
model (onnx.ModelProto): Input onnx model.
"""
logger = get_root_logger()
dst_domain = ['']
for node in model.graph.node:
if hasattr(node, 'module') and (node.module not in dst_domain):
dst_domain.append(node.module)
src_domains = [oi.domain for oi in model.opset_import]
for i, src_domain in enumerate(src_domains):
if src_domain not in dst_domain:
logger.info(f'remove opset_import {src_domain}')
model.opset_import.pop(i) | Remove useless imports from an ONNX model. The domain like `mmdeploy` might influence model conversion for some backends. Args: model (onnx.ModelProto): Input onnx model. |
188,567 | from typing import Dict
import mmengine
import torch.nn as nn
from mmdeploy.utils.constants import IR, Backend
from .function_rewriter import FunctionRewriter
from .module_rewriter import ModuleRewriter
from .rewriter_utils import collect_env
from .symbolic_rewriter import SymbolicRewriter
MODULE_REWRITER = REWRITER_MANAGER.module_rewriter
class IR(AdvancedEnum):
"""Define intermediate representation enumerations."""
ONNX = 'onnx'
TORCHSCRIPT = 'torchscript'
DEFAULT = 'default'
class Backend(AdvancedEnum):
"""Define backend enumerations."""
PYTORCH = 'pytorch'
TENSORRT = 'tensorrt'
ONNXRUNTIME = 'onnxruntime'
PPLNN = 'pplnn'
NCNN = 'ncnn'
SNPE = 'snpe'
OPENVINO = 'openvino'
SDK = 'sdk'
TORCHSCRIPT = 'torchscript'
RKNN = 'rknn'
ASCEND = 'ascend'
COREML = 'coreml'
TVM = 'tvm'
VACC = 'vacc'
DEFAULT = 'default'
The provided code snippet includes necessary dependencies for implementing the `patch_model` function. Write a Python function `def patch_model(model: nn.Module, cfg: mmengine.Config, backend: str = Backend.DEFAULT.value, ir: IR = IR.DEFAULT, recursive: bool = True, **kwargs) -> nn.Module` to solve the following problem:
Patch the model, replace the modules that can be rewritten. Note that the original model will be modified permanently. Args: model (torch.nn.Module): The model to patch. cfg (Dict): Config dictionary of deployment. backend (str): The inference engine name. ir (IR): The intermeditate representation name. recursive (bool): The flag to enable recursive patching. Returns: nn.Module: THe patched model. Examples: >>> from mmdeploy.core import patch_model >>> from mmdeploy.utils import Backend, IR >>> deploy_cfg = {} >>> backend = Backend.DEFAULT.value >>> ir = IR.ONNX >>> patched_model = patch_model(model, deploy_cfg, backend, ir)
Here is the function:
def patch_model(model: nn.Module,
cfg: mmengine.Config,
backend: str = Backend.DEFAULT.value,
ir: IR = IR.DEFAULT,
recursive: bool = True,
**kwargs) -> nn.Module:
"""Patch the model, replace the modules that can be rewritten. Note that
the original model will be modified permanently.
Args:
model (torch.nn.Module): The model to patch.
cfg (Dict): Config dictionary of deployment.
backend (str): The inference engine name.
ir (IR): The intermeditate representation name.
recursive (bool): The flag to enable recursive patching.
Returns:
nn.Module: THe patched model.
Examples:
>>> from mmdeploy.core import patch_model
>>> from mmdeploy.utils import Backend, IR
>>> deploy_cfg = {}
>>> backend = Backend.DEFAULT.value
>>> ir = IR.ONNX
>>> patched_model = patch_model(model, deploy_cfg, backend, ir)
"""
return MODULE_REWRITER.patch_model(model, cfg, backend, ir, recursive,
**kwargs) | Patch the model, replace the modules that can be rewritten. Note that the original model will be modified permanently. Args: model (torch.nn.Module): The model to patch. cfg (Dict): Config dictionary of deployment. backend (str): The inference engine name. ir (IR): The intermeditate representation name. recursive (bool): The flag to enable recursive patching. Returns: nn.Module: THe patched model. Examples: >>> from mmdeploy.core import patch_model >>> from mmdeploy.utils import Backend, IR >>> deploy_cfg = {} >>> backend = Backend.DEFAULT.value >>> ir = IR.ONNX >>> patched_model = patch_model(model, deploy_cfg, backend, ir) |
188,568 | import functools
import inspect
import types
import warnings
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmdeploy
from mmdeploy.utils.constants import IR, Backend
The provided code snippet includes necessary dependencies for implementing the `eval_with_import` function. Write a Python function `def eval_with_import(path: str) -> Any` to solve the following problem:
Evaluate the string as Python script. Args: path (str): The path to evaluate. Returns: Any: The result of evaluation.
Here is the function:
def eval_with_import(path: str) -> Any:
"""Evaluate the string as Python script.
Args:
path (str): The path to evaluate.
Returns:
Any: The result of evaluation.
"""
split_path = path.split('.')
for i in range(len(split_path), 0, -1):
try:
exec('import {}'.format('.'.join(split_path[:i])))
break
except Exception:
continue
return eval(path) | Evaluate the string as Python script. Args: path (str): The path to evaluate. Returns: Any: The result of evaluation. |
188,569 | import functools
import inspect
import types
import warnings
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmdeploy
from mmdeploy.utils.constants import IR, Backend
The provided code snippet includes necessary dependencies for implementing the `import_function` function. Write a Python function `def import_function(path: str) -> Tuple[Callable, Optional[type]]` to solve the following problem:
Import and evaluate a function. If the function is defined in a class, evaluate the class additionally. Args: path (str): The path to evaluate. Returns: Callable: The function of evaluation. type: The class of evaluation if the function is defined in a class, or None.
Here is the function:
def import_function(path: str) -> Tuple[Callable, Optional[type]]:
"""Import and evaluate a function. If the function is defined in a class,
evaluate the class additionally.
Args:
path (str): The path to evaluate.
Returns:
Callable: The function of evaluation.
type: The class of evaluation if the function is defined in a class, or
None.
"""
split_path = path.split('.')
for i in range(len(split_path), 0, -1):
try:
exec('import {}'.format('.'.join(split_path[:i])))
break
except Exception:
continue
obj = eval(path)
# The path that might be a class
previous_obj = eval('.'.join(split_path[:-1]))
# Check if the path leads to a class
if inspect.isclass(previous_obj):
return obj, previous_obj
else:
return obj, None | Import and evaluate a function. If the function is defined in a class, evaluate the class additionally. Args: path (str): The path to evaluate. Returns: Callable: The function of evaluation. type: The class of evaluation if the function is defined in a class, or None. |
188,570 | import functools
import inspect
import types
import warnings
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmdeploy
from mmdeploy.utils.constants import IR, Backend
class IR(AdvancedEnum):
"""Define intermediate representation enumerations."""
ONNX = 'onnx'
TORCHSCRIPT = 'torchscript'
DEFAULT = 'default'
class Backend(AdvancedEnum):
"""Define backend enumerations."""
PYTORCH = 'pytorch'
TENSORRT = 'tensorrt'
ONNXRUNTIME = 'onnxruntime'
PPLNN = 'pplnn'
NCNN = 'ncnn'
SNPE = 'snpe'
OPENVINO = 'openvino'
SDK = 'sdk'
TORCHSCRIPT = 'torchscript'
RKNN = 'rknn'
ASCEND = 'ascend'
COREML = 'coreml'
TVM = 'tvm'
VACC = 'vacc'
DEFAULT = 'default'
The provided code snippet includes necessary dependencies for implementing the `collect_env` function. Write a Python function `def collect_env(backend: Backend, ir: IR, **kwargs) -> Dict` to solve the following problem:
Collect current environment information, including backend, ir, codebase version, etc. Rewriters will be checked according to env infos. Args: backend (Backend): Current backend. ir (IR): Current IR. Returns: Dict: Record the value of Backend and IR as well as the versions of libraries.
Here is the function:
def collect_env(backend: Backend, ir: IR, **kwargs) -> Dict:
"""Collect current environment information, including backend, ir, codebase
version, etc. Rewriters will be checked according to env infos.
Args:
backend (Backend): Current backend.
ir (IR): Current IR.
Returns:
Dict: Record the value of Backend and IR as well as the versions of
libraries.
"""
from mmdeploy.utils import get_backend_version, get_codebase_version
env = dict(backend=backend, ir=ir)
env['mmdeploy'] = mmdeploy.__version__
env.update(get_backend_version())
env.update(get_codebase_version())
env.update(kwargs)
return env | Collect current environment information, including backend, ir, codebase version, etc. Rewriters will be checked according to env infos. Args: backend (Backend): Current backend. ir (IR): Current IR. Returns: Dict: Record the value of Backend and IR as well as the versions of libraries. |
188,571 | import functools
import inspect
import types
import warnings
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmdeploy
from mmdeploy.utils.constants import IR, Backend
The provided code snippet includes necessary dependencies for implementing the `get_func_qualname` function. Write a Python function `def get_func_qualname(func: Callable) -> str` to solve the following problem:
get function name.
Here is the function:
def get_func_qualname(func: Callable) -> str:
"""get function name."""
assert isinstance(func, Callable), f'{func} is not a Callable object.'
_func_name = None
if hasattr(func, '__qualname__'):
_func_name = f'{func.__module__}.{func.__qualname__}'
elif hasattr(func, '__class__'):
_func_name = func.__class__
else:
_func_name = str(func)
return _func_name | get function name. |
188,572 | import functools
import inspect
import types
import warnings
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmdeploy
from mmdeploy.utils.constants import IR, Backend
The provided code snippet includes necessary dependencies for implementing the `get_frame_func` function. Write a Python function `def get_frame_func(top: int = 1) -> Callable` to solve the following problem:
get func of frame.
Here is the function:
def get_frame_func(top: int = 1) -> Callable:
"""get func of frame."""
frameinfo = inspect.stack()[top]
frame = frameinfo.frame
g_vars = frame.f_globals
func_name = frameinfo.function
assert func_name in g_vars, \
f'Can not find function: {func_name} in global.'
func = g_vars[func_name]
return func | get func of frame. |
188,573 | import functools
import inspect
import types
import warnings
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmdeploy
from mmdeploy.utils.constants import IR, Backend
The provided code snippet includes necessary dependencies for implementing the `get_frame_qualname` function. Write a Python function `def get_frame_qualname(top: int = 1) -> str` to solve the following problem:
get frame name.
Here is the function:
def get_frame_qualname(top: int = 1) -> str:
"""get frame name."""
frameinfo = inspect.stack()[top]
frame = frameinfo.frame
g_vars = frame.f_globals
func_name = frameinfo.function
assert func_name in g_vars, \
f'Can not find function: {func_name} in global.'
func = g_vars[func_name]
module_name = inspect.getmodule(func).__name__
return f'{module_name}.{func_name}' | get frame name. |
188,574 | import functools
import inspect
import types
import warnings
from abc import ABCMeta, abstractmethod
from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import mmdeploy
from mmdeploy.utils.constants import IR, Backend
The provided code snippet includes necessary dependencies for implementing the `copy_function` function. Write a Python function `def copy_function(f: types.FunctionType)` to solve the following problem:
Copy the function.
Here is the function:
def copy_function(f: types.FunctionType):
"""Copy the function."""
# copy the global so we can get different func for different origin
glb = f.__globals__.copy()
name = f.__name__
g = types.FunctionType(
f.__code__,
glb,
name=name,
argdefs=f.__defaults__,
closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
glb[name] = g
return g | Copy the function. |
188,575 | import types
from collections import defaultdict
from typing import (Any, Callable, Dict, List, MutableSequence, Optional,
Tuple, Union)
from mmdeploy.utils import IR, Backend, get_root_logger
from .rewriter_utils import (Checker, ContextCaller, RewriterRegistry,
copy_function, get_frame_func, get_func_qualname,
import_function)
def _replace_all_obj(obj: Any,
new_obj: Any,
ignore_refs: Tuple[Any] = tuple(),
ignore_keys: Tuple[str] = tuple()):
"""Replace all object reference with new_object.
Args:
obj (Any): The object to be replaced.
new_obj (Any): The object to replace obj.
ignore_refs (Tuple[Any]): These refs will be ignored.
ignore_keys (Tuple[str]): object with these keys will be ignored.
"""
import gc
refs = gc.get_referrers(obj)
obj_id = id(obj)
for ref in refs:
if ref in ignore_refs:
continue
elif isinstance(ref, MutableSequence):
for i, v in enumerate(ref):
if id(v) == obj_id:
ref[i] = new_obj
elif isinstance(ref, Dict):
for k, v in ref.items():
if id(v) == obj_id and k not in ignore_keys:
ref[k] = new_obj
else:
# TODO: check if we can replace tuple
pass
The provided code snippet includes necessary dependencies for implementing the `_set_func` function. Write a Python function `def _set_func(origin_func_path: str, rewrite_func: Callable, ignore_refs: Tuple[Any] = tuple(), ignore_keys: Tuple[str] = ('origin_func', ))` to solve the following problem:
Rewrite a function by executing a python statement. Args: origin_func_path (str): The path to origin function. rewrite_func (Callable): The new function instance. ignore_refs (Tuple[Any]): These refs will be ignored. ignore_keys (Tuple[str]): object with these keys will be ignored.
Here is the function:
def _set_func(origin_func_path: str,
rewrite_func: Callable,
ignore_refs: Tuple[Any] = tuple(),
ignore_keys: Tuple[str] = ('origin_func', )):
"""Rewrite a function by executing a python statement.
Args:
origin_func_path (str): The path to origin function.
rewrite_func (Callable): The new function instance.
ignore_refs (Tuple[Any]): These refs will be ignored.
ignore_keys (Tuple[str]): object with these keys will be ignored.
"""
# Import necessary module
split_path = origin_func_path.split('.')
for i in range(len(split_path), 0, -1):
try:
exec('import {}'.format('.'.join(split_path[:i])))
break
except Exception:
continue
origin_func = eval(origin_func_path)
method_class = False
if len(split_path) > 1:
module_or_class = eval('.'.join(split_path[:-1]))
if isinstance(module_or_class, type):
method_class = True
# Assign function
if not method_class:
_replace_all_obj(
origin_func,
rewrite_func,
ignore_refs=ignore_refs,
ignore_keys=ignore_keys)
exec(f'{origin_func_path} = rewrite_func') | Rewrite a function by executing a python statement. Args: origin_func_path (str): The path to origin function. rewrite_func (Callable): The new function instance. ignore_refs (Tuple[Any]): These refs will be ignored. ignore_keys (Tuple[str]): object with these keys will be ignored. |
188,576 | import types
from collections import defaultdict
from typing import (Any, Callable, Dict, List, MutableSequence, Optional,
Tuple, Union)
from mmdeploy.utils import IR, Backend, get_root_logger
from .rewriter_utils import (Checker, ContextCaller, RewriterRegistry,
copy_function, get_frame_func, get_func_qualname,
import_function)
The provided code snippet includes necessary dependencies for implementing the `_del_func` function. Write a Python function `def _del_func(path: str)` to solve the following problem:
Delete a function that is denoted by a path. Args: path (str): The path to evaluate.
Here is the function:
def _del_func(path: str):
"""Delete a function that is denoted by a path.
Args:
path (str): The path to evaluate.
"""
split_path = path.split('.')
for i in range(len(split_path), 0, -1):
try:
exec('import {}'.format('.'.join(split_path[:i])))
exec(f'del {path}')
break
except Exception:
continue | Delete a function that is denoted by a path. Args: path (str): The path to evaluate. |
188,577 | import types
from collections import defaultdict
from typing import (Any, Callable, Dict, List, MutableSequence, Optional,
Tuple, Union)
from mmdeploy.utils import IR, Backend, get_root_logger
from .rewriter_utils import (Checker, ContextCaller, RewriterRegistry,
copy_function, get_frame_func, get_func_qualname,
import_function)
The provided code snippet includes necessary dependencies for implementing the `_fx_wrap_copied_fn` function. Write a Python function `def _fx_wrap_copied_fn(func: types.FunctionType, copied_func: types.FunctionType)` to solve the following problem:
If a function is wrapped by torch.fx.wrap, its copy also needs to be wrapped by torch.fx.wrap.
Here is the function:
def _fx_wrap_copied_fn(func: types.FunctionType,
copied_func: types.FunctionType):
"""If a function is wrapped by torch.fx.wrap, its copy also needs to be
wrapped by torch.fx.wrap."""
if not hasattr(func, '__globals__'):
return
wrapped_fns_globals = [item[0] for item in _wrapped_fns_to_patch]
wrapped_fns_names = [item[1] for item in _wrapped_fns_to_patch]
# check if wrapped by torch.fx.wrap
if func.__globals__ in wrapped_fns_globals:
idx = wrapped_fns_globals.index(func.__globals__)
fn_name = wrapped_fns_names[idx]
# a hacky way to wrap the func in copied func
_wrapped_fns_to_patch.append((copied_func.__globals__, fn_name)) | If a function is wrapped by torch.fx.wrap, its copy also needs to be wrapped by torch.fx.wrap. |
188,578 | from torch.onnx.symbolic_helper import parse_args
from mmdeploy.core import SYMBOLIC_REWRITER
from mmdeploy.utils import Backend, get_backend
def grid_sampler(g,
input,
grid,
interpolation_mode,
padding_mode,
align_corners=False):
"""Symbolic function for `grid_sampler`.
PyTorch does not support export grid_sampler to ONNX by default. We add the
support here. `grid_sampler` will be exported as ONNX node
'mmdeploy::grid_sampler'
"""
return g.op(
'mmdeploy::grid_sampler',
input,
grid,
interpolation_mode_i=interpolation_mode,
padding_mode_i=padding_mode,
align_corners_i=align_corners)
def grid_sampler_ppl(g,
input,
grid,
interpolation_mode,
padding_mode,
align_corners=False):
"""Symbolic function for `grid_sampler`.
PyTorch does not support export grid_sampler to ONNX by default. We add the
support here. `grid_sampler` will be exported as ONNX node
'mmdeploy::grid_sampler'
"""
return g.op(
'mmcv::grid_sampler',
input,
grid,
interpolation_mode_i=interpolation_mode,
padding_mode_i=padding_mode,
align_corners_i=align_corners)
The provided code snippet includes necessary dependencies for implementing the `grid_sampler__default` function. Write a Python function `def grid_sampler__default(*args)` to solve the following problem:
Register default symbolic function for `grid_sampler`. Add support to grid_sample to ONNX.
Here is the function:
def grid_sampler__default(*args):
"""Register default symbolic function for `grid_sampler`.
Add support to grid_sample to ONNX.
"""
ctx = SYMBOLIC_REWRITER.get_context()
backend = get_backend(ctx.cfg)
if backend == Backend.PPLNN:
return grid_sampler_ppl(*args)
else:
return grid_sampler(*args) | Register default symbolic function for `grid_sampler`. Add support to grid_sample to ONNX. |
188,579 | from torch.onnx import symbolic_helper
from mmdeploy.core import SYMBOLIC_REWRITER
from mmdeploy.utils import Backend
def gelu__ncnn_pt111(g, self):
"""gelu for torch<=1.12."""
return g.op('mmdeploy::Gelu', self)
'gelu', is_pytorch=True, backend=Backend.NCNN.value)
The provided code snippet includes necessary dependencies for implementing the `gelu__ncnn` function. Write a Python function `def gelu__ncnn(g, self, approximate: str = 'none')` to solve the following problem:
Support export GELU with ncnn backend.
Here is the function:
def gelu__ncnn(g, self, approximate: str = 'none'):
"""Support export GELU with ncnn backend."""
return gelu__ncnn_pt111(g, self) | Support export GELU with ncnn backend. |
188,580 |
The provided code snippet includes necessary dependencies for implementing the `hardsigmoid__default` function. Write a Python function `def hardsigmoid__default(g, self)` to solve the following problem:
Support export hardsigmoid This rewrite enable export hardsigmoid in torch<=1.8.2.
Here is the function:
def hardsigmoid__default(g, self):
"""Support export hardsigmoid This rewrite enable export hardsigmoid in
torch<=1.8.2."""
return g.op('HardSigmoid', self, alpha_f=1 / 6) | Support export hardsigmoid This rewrite enable export hardsigmoid in torch<=1.8.2. |
188,581 | import sys
from torch.onnx.symbolic_helper import _slice_helper, parse_args
from mmdeploy.core import SYMBOLIC_REWRITER
def roll(g, self, shifts, dims):
"""Symbolic function for `roll`."""
assert len(shifts) == len(dims)
result = self
for i in range(len(shifts)):
shapes = []
shape = _slice_helper(
g, result, axes=[dims[i]], starts=[-shifts[i]], ends=[sys.maxsize])
shapes.append(shape)
shape = _slice_helper(
g, result, axes=[dims[i]], starts=[0], ends=[-shifts[i]])
shapes.append(shape)
result = g.op('Concat', *shapes, axis_i=dims[i])
return result
The provided code snippet includes necessary dependencies for implementing the `roll_default` function. Write a Python function `def roll_default(g, self, shifts, dims)` to solve the following problem:
Support export roll to ONNX with PyTorch version 1.10-.
Here is the function:
def roll_default(g, self, shifts, dims):
"""Support export roll to ONNX with PyTorch version 1.10-."""
return roll(g, self, shifts, dims) | Support export roll to ONNX with PyTorch version 1.10-. |
188,582 | from torch.onnx.symbolic_helper import parse_args
from mmdeploy.core import SYMBOLIC_REWRITER
from mmdeploy.utils import Backend
The provided code snippet includes necessary dependencies for implementing the `layer_norm__default` function. Write a Python function `def layer_norm__default(g, input, normalized_shape, weight, bias, eps, cudnn_enable)` to solve the following problem:
Symbolic function for `layer_norm` Layer norm with torch<=1.12 might lead to wrong output shapes. Add keepdims=1 to each ReduceMean node to correct the shape.
Here is the function:
def layer_norm__default(g, input, normalized_shape, weight, bias, eps,
cudnn_enable):
"""Symbolic function for `layer_norm`
Layer norm with torch<=1.12 might lead to wrong output shapes. Add
keepdims=1 to each ReduceMean node to correct the shape.
"""
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_opset9 import add, mul, pow, sqrt, sub
axes = [-i for i in range(len(normalized_shape), 0, -1)]
two_cst = sym_help._generate_wrapped_number(g, 2.)
eps_cst = sym_help._generate_wrapped_number(g, eps)
mean = g.op('ReduceMean', input, axes_i=axes, keepdims_i=1)
numerator = sub(g, input, mean)
# variance = e((x - e(x))^2), and (x - e(x)) is the numerator in the
# layer_norm formula
variance = g.op(
'ReduceMean', pow(g, numerator, two_cst), axes_i=axes, keepdims_i=1)
denominator = sqrt(g, add(g, variance, eps_cst))
layer_norm = g.op('Div', numerator, denominator)
if not (weight is None or sym_help._is_none(weight)):
layer_norm = mul(g, layer_norm, weight)
if not (bias is None or sym_help._is_none(bias)):
layer_norm = add(g, layer_norm, bias)
return layer_norm | Symbolic function for `layer_norm` Layer norm with torch<=1.12 might lead to wrong output shapes. Add keepdims=1 to each ReduceMean node to correct the shape. |
188,583 | from torch.onnx.symbolic_helper import parse_args
from mmdeploy.core import SYMBOLIC_REWRITER
from mmdeploy.utils import Backend
def _layer_norm_ncnn(g, input, normalized_shape, weight, bias, eps,
cudnn_enable):
"""Symbolic function for `layer_norm`.
PyTorch does not support export layer_norm to ONNX by default. We add the
support here. `layer_norm` will be exported as ONNX node
'mmdeploy::layer_norm'
"""
weight.setDebugName('layernorm_weight')
bias.setDebugName('layernorm_bias')
return g.op(
'mmdeploy::LayerNorm', input, weight, bias, affine_i=1, epsilon_f=eps)
'layer_norm', is_pytorch=True, backend=Backend.NCNN.value)
The provided code snippet includes necessary dependencies for implementing the `layer_norm__ncnn` function. Write a Python function `def layer_norm__ncnn(*args)` to solve the following problem:
Register default symbolic function for `layer_norm`. Add support to layer_norm to ONNX.
Here is the function:
def layer_norm__ncnn(*args):
"""Register default symbolic function for `layer_norm`.
Add support to layer_norm to ONNX.
"""
return _layer_norm_ncnn(*args) | Register default symbolic function for `layer_norm`. Add support to layer_norm to ONNX. |
188,584 | import torch
import torch.onnx.symbolic_helper as sym_help
from mmdeploy.core import SYMBOLIC_REWRITER
from mmdeploy.utils import get_ir_config
The provided code snippet includes necessary dependencies for implementing the `squeeze__default` function. Write a Python function `def squeeze__default(g, self, dim=None)` to solve the following problem:
Register default symbolic function for `squeeze`. squeeze might be exported with IF node in ONNX, which is not supported in lots of backend.
Here is the function:
def squeeze__default(g, self, dim=None):
"""Register default symbolic function for `squeeze`.
squeeze might be exported with IF node in ONNX, which is not supported in
lots of backend.
"""
if dim is None:
dims = []
for i, size in enumerate(self.type().sizes()):
if size == 1:
dims.append(i)
else:
dims = [sym_help._get_const(dim, 'i', 'dim')]
ctx = SYMBOLIC_REWRITER.get_context('squeeze')
if get_ir_config(ctx.cfg).get('opset_version', 11) >= 13:
axes = g.op('Constant', value_t=torch.tensor(dims, dtype=torch.long))
return g.op('Squeeze', self, axes)
return g.op('Squeeze', self, axes_i=dims) | Register default symbolic function for `squeeze`. squeeze might be exported with IF node in ONNX, which is not supported in lots of backend. |
188,585 | import torch
from torch.onnx.symbolic_helper import (_get_tensor_dim_size, _get_tensor_rank,
_unimplemented, _unsqueeze_helper,
parse_args)
from mmdeploy.core import SYMBOLIC_REWRITER
def instance_norm(g, input, num_groups, weight, bias, eps, cudnn_enabled):
"""Symbolic function for `instance_norm`."""
channel_size = _get_tensor_dim_size(input, 1)
if channel_size is not None:
assert channel_size % num_groups == 0
input_rank = _get_tensor_rank(input)
if input_rank is None:
return _unimplemented('group_norm', 'unknown input rank')
# 0 in the shape list keeps dimension value unchanged.
shape = [0, num_groups, -1]
input_reshaped = g.op('Reshape', input,
g.op('Constant', value_t=torch.LongTensor(shape)))
# C is always divisible by num_groups
# Due to shape difference. we need to apply weight and bias after
# instance norm computation and reshape
weight_ = g.op(
'Constant',
value_t=torch.tensor(
[1.] * num_groups).type('torch.' + input.type().scalarType() +
'Tensor'))
bias_ = g.op(
'Constant',
value_t=torch.tensor(
[0.] * num_groups).type('torch.' + input.type().scalarType() +
'Tensor'))
norm_reshaped = g.op(
'mmdeploy::TRTInstanceNormalization',
input_reshaped,
weight_,
bias_,
epsilon_f=eps)
norm = g.op('Reshape', norm_reshaped, g.op('Shape', input))
if weight is None or weight.node().mustBeNone():
weight_value = torch.tensor(
[1.]).type('torch.' + input.type().scalarType() + 'Tensor')
weight = g.op('Constant', value_t=weight_value)
if bias is None or bias.node().mustBeNone():
bias_value = torch.tensor(
[0.]).type('torch.' + input.type().scalarType() + 'Tensor')
bias = g.op('Constant', value_t=bias_value)
# Norm has shape [N, C, *] so we reshape weight and bias to [C, *]
axes = list(range(1, input_rank - 1))
from torch.onnx.symbolic_opset9 import add, mul
return add(g, mul(g, norm, _unsqueeze_helper(g, weight, axes)),
_unsqueeze_helper(g, bias, axes))
'group_norm', backend='tensorrt', is_pytorch=True)
The provided code snippet includes necessary dependencies for implementing the `instance_norm__tensorrt` function. Write a Python function `def instance_norm__tensorrt(*args)` to solve the following problem:
Register symbolic function for TensorRT backend. Notes: Instance normalization is implemented in group norm in pytorch.
Here is the function:
def instance_norm__tensorrt(*args):
"""Register symbolic function for TensorRT backend.
Notes:
Instance normalization is implemented in group norm in pytorch.
"""
return instance_norm(*args) | Register symbolic function for TensorRT backend. Notes: Instance normalization is implemented in group norm in pytorch. |
188,586 | import warnings
import torch
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_helper import _unimplemented
from torch.onnx.symbolic_opset9 import unused
from mmdeploy.core import FUNCTION_REWRITER
warnings.warn(
'Exporting a model to ONNX with a batch_size other than 1, ' +
'with a variable length with ' + variant + ' can cause an error ' +
'when running the ONNX model with a different batch size. ' +
'Make sure to save the model with a batch size of 1, ' +
'or define the initial states (h0/c0) as inputs of the model. ')
onnxActivations = [
'Relu', 'Tanh', 'Sigmoid', 'Affine', 'LeakyRelu', 'ThresholdedRelu',
'ScaledTanh', 'HardSigmoid', 'Elu', 'Softsign', 'Softplus'
]
variantToOnnxActivationMap = dict(
zip([act_fun.lower() for act_fun in onnxActivations], onnxActivations))
weights_per_layer = 4 if has_biases else 2
assert len(all_weights) == num_layers * weights_per_layer * (1 +
bidirectional)
layer_weights = [
all_weights[i:i + weights_per_layer]
for i in range(0, len(all_weights), weights_per_layer)
]
w_hh = all_weights[1]
hidden_size = sym_help._get_tensor_dim_size(w_hh, 1)
if hidden_size is None:
return _unimplemented('RNN/GRU/LSTM', 'unknown hidden size')
unidirectional = not bidirectional
prev_output = input
h_outs = []
sequence_lens = unused(g) if batch_sizes is None else batch_sizes
def reform_weights(g, w, n, intervals):
slices = [
sym_help._slice_helper(
g, w, axes=[0], starts=[x * n], ends=[y * n])
for x, y in intervals
]
return g.op('Concat', *slices, axis_i=0)
def transform_weights_no_bias(layer_index):
weights = layer_weights[layer_index]
if variant == 'RNN':
weight_ih, weight_hh = weights
elif variant == 'GRU' or variant == 'LSTM':
weight_ih, weight_hh = [
reform_weights(g, w, hidden_size, reform_permutation)
for w in weights
]
return tuple(
sym_help._unsqueeze_helper(g, x, [0])
for x in (weight_ih, weight_hh))
def transform_weights(layer_index):
weights = layer_weights[layer_index]
if variant == 'RNN':
weight_ih, weight_hh, bias_ih, bias_hh = weights
elif variant == 'GRU' or variant == 'LSTM':
weight_ih, weight_hh, bias_ih, bias_hh = [
reform_weights(g, w, hidden_size, reform_permutation)
for w in weights
]
bias_concat = g.op('Concat', bias_ih, bias_hh, axis_i=0)
return tuple(
sym_help._unsqueeze_helper(g, x, [0])
for x in (weight_ih, weight_hh, bias_concat))
def retrieve_state(x, start, end):
return x if num_layers == 1 else sym_help._slice_helper(
g, x, axes=[0], starts=[start], ends=[end])
for i in range(num_layers):
if unidirectional:
if weights_per_layer == 4:
weight_ih, weight_hh, bias_concat = transform_weights(i)
else:
weight_ih, weight_hh = transform_weights_no_bias(i)
bias_concat = unused(g)
state_indices = i, i + 1
else:
if weights_per_layer == 4:
weight_ih_f, weight_hh_f, bias_f = transform_weights(2 * i)
weight_ih_b, weight_hh_b, bias_b = transform_weights(2 * i + 1)
bias_concat = g.op('Concat', bias_f, bias_b, axis_i=0)
else:
weight_ih_f, weight_hh_f = transform_weights_no_bias(2 * i)
weight_ih_b, weight_hh_b = transform_weights_no_bias(2 * i + 1)
bias_concat = unused(g)
weight_ih = g.op('Concat', weight_ih_f, weight_ih_b, axis_i=0)
weight_hh = g.op('Concat', weight_hh_f, weight_hh_b, axis_i=0)
state_indices = 2 * i, 2 * i + 2
inputs = [
prev_output, weight_ih, weight_hh, bias_concat, sequence_lens
]
inputs.append(retrieve_state(h0, *state_indices))
if variant == 'LSTM':
inputs.append(retrieve_state(c0, *state_indices))
extra_kwargs = {} if unidirectional else {
'direction_s': 'bidirectional'
}
if variant == 'RNN':
if bidirectional:
activation = [nonlinearity, nonlinearity]
else:
activation = [nonlinearity]
prev_output, h_out = g.op(
'RNN',
*inputs,
outputs=2,
hidden_size_i=hidden_size,
activations_s=activation,
**extra_kwargs)
elif variant == 'GRU':
prev_output, h_out = g.op(
'GRU',
*inputs,
outputs=2,
hidden_size_i=hidden_size,
linear_before_reset_i=1,
**extra_kwargs)
elif variant == 'LSTM':
# g.op will add some node to h0 and c0,
# which is not necessary for us
prev_output, h_out, c_out = g.op(
'ncnn::LSTM',
*inputs,
outputs=3,
hidden_size_i=hidden_size,
**extra_kwargs)
if bidirectional:
# The ONNX RNN/GRU/LSTM produce an output of dimensions
# seq_len, num_directions, batch, hidden_size
# We have to convert to match pytorch's expected
# seq_len, batch, num_directions * hidden_size
# by first moving num_directions before hidden_size with
# Transpose, and then combining it with hidden_size
# with Reshape.
prev_output = g.op('Transpose', prev_output, perm_i=[0, 2, 1, 3])
prev_output = g.op(
'Reshape', prev_output,
g.op('Constant', value_t=torch.LongTensor([0, 0, -1])))
else:
prev_output = sym_help._squeeze_helper(g, prev_output, [1])
h_outs.append(h_out)
if variant == 'LSTM':
c_outs.append(c_out)
h_outs = h_out if num_layers == 1 else g.op('Concat', *h_outs, axis_i=0)
The provided code snippet includes necessary dependencies for implementing the `generic_rnn__ncnn` function. Write a Python function `def generic_rnn__ncnn(g, variant, input, initial_states, all_weights, has_biases, num_layers, dropout, train, bidirectional, batch_first=None, batch_sizes=None)` to solve the following problem:
rewrite of _generic_rnn for ncnn. `g.op` will add some nodes for h0 and c0 in LSTM. which is not supported in ncnn. So we add a custom domain to avoid it.
Here is the function:
def generic_rnn__ncnn(g,
variant,
input,
initial_states,
all_weights,
has_biases,
num_layers,
dropout,
train,
bidirectional,
batch_first=None,
batch_sizes=None):
"""rewrite of _generic_rnn for ncnn.
`g.op` will add some nodes for h0 and c0 in LSTM. which is not supported in
ncnn. So we add a custom domain to avoid it.
"""
warnings.warn(
'Exporting a model to ONNX with a batch_size other than 1, ' +
'with a variable length with ' + variant + ' can cause an error ' +
'when running the ONNX model with a different batch size. ' +
'Make sure to save the model with a batch size of 1, ' +
'or define the initial states (h0/c0) as inputs of the model. ')
onnxActivations = [
'Relu', 'Tanh', 'Sigmoid', 'Affine', 'LeakyRelu', 'ThresholdedRelu',
'ScaledTanh', 'HardSigmoid', 'Elu', 'Softsign', 'Softplus'
]
variantToOnnxActivationMap = dict(
zip([act_fun.lower() for act_fun in onnxActivations], onnxActivations))
weights_per_layer = 4 if has_biases else 2
# this means that projections are used inside LSTM,
# so need to tell user that it's not supported
if variant == 'LSTM' and len(
all_weights) != num_layers * weights_per_layer * (1 +
bidirectional):
return _unimplemented('LSTM', 'LSTMs with projections')
assert len(all_weights) == num_layers * weights_per_layer * (1 +
bidirectional)
layer_weights = [
all_weights[i:i + weights_per_layer]
for i in range(0, len(all_weights), weights_per_layer)
]
if batch_first:
# batch, seq, feat -> seq, batch, feat
input = g.op('Transpose', input, perm_i=[1, 0, 2])
if dropout and train:
return _unimplemented('RNN/GRU/LSTM', 'dropout in training mode')
if variant.startswith('RNN'):
nonlinearity = variantToOnnxActivationMap[variant[4:].lower()]
variant = 'RNN'
w_hh = all_weights[1]
hidden_size = sym_help._get_tensor_dim_size(w_hh, 1)
if hidden_size is None:
return _unimplemented('RNN/GRU/LSTM', 'unknown hidden size')
unidirectional = not bidirectional
prev_output = input
h_outs = []
if variant == 'RNN' or variant == 'GRU':
h0 = initial_states
elif variant == 'LSTM':
h0, c0 = initial_states
c_outs = []
sequence_lens = unused(g) if batch_sizes is None else batch_sizes
if variant == 'GRU':
# pytorch is reset, input, hidden
# onnx is input, reset, hidden
reform_permutation = [(1, 2), (0, 1), (2, 3)]
elif variant == 'LSTM':
# pytorch is input, forget, cell, output.
# onnx is input, output, forget, cell.
reform_permutation = [(0, 1), (3, 4), (1, 3)]
def reform_weights(g, w, n, intervals):
slices = [
sym_help._slice_helper(
g, w, axes=[0], starts=[x * n], ends=[y * n])
for x, y in intervals
]
return g.op('Concat', *slices, axis_i=0)
def transform_weights_no_bias(layer_index):
weights = layer_weights[layer_index]
if variant == 'RNN':
weight_ih, weight_hh = weights
elif variant == 'GRU' or variant == 'LSTM':
weight_ih, weight_hh = [
reform_weights(g, w, hidden_size, reform_permutation)
for w in weights
]
return tuple(
sym_help._unsqueeze_helper(g, x, [0])
for x in (weight_ih, weight_hh))
def transform_weights(layer_index):
weights = layer_weights[layer_index]
if variant == 'RNN':
weight_ih, weight_hh, bias_ih, bias_hh = weights
elif variant == 'GRU' or variant == 'LSTM':
weight_ih, weight_hh, bias_ih, bias_hh = [
reform_weights(g, w, hidden_size, reform_permutation)
for w in weights
]
bias_concat = g.op('Concat', bias_ih, bias_hh, axis_i=0)
return tuple(
sym_help._unsqueeze_helper(g, x, [0])
for x in (weight_ih, weight_hh, bias_concat))
def retrieve_state(x, start, end):
return x if num_layers == 1 else sym_help._slice_helper(
g, x, axes=[0], starts=[start], ends=[end])
for i in range(num_layers):
if unidirectional:
if weights_per_layer == 4:
weight_ih, weight_hh, bias_concat = transform_weights(i)
else:
weight_ih, weight_hh = transform_weights_no_bias(i)
bias_concat = unused(g)
state_indices = i, i + 1
else:
if weights_per_layer == 4:
weight_ih_f, weight_hh_f, bias_f = transform_weights(2 * i)
weight_ih_b, weight_hh_b, bias_b = transform_weights(2 * i + 1)
bias_concat = g.op('Concat', bias_f, bias_b, axis_i=0)
else:
weight_ih_f, weight_hh_f = transform_weights_no_bias(2 * i)
weight_ih_b, weight_hh_b = transform_weights_no_bias(2 * i + 1)
bias_concat = unused(g)
weight_ih = g.op('Concat', weight_ih_f, weight_ih_b, axis_i=0)
weight_hh = g.op('Concat', weight_hh_f, weight_hh_b, axis_i=0)
state_indices = 2 * i, 2 * i + 2
inputs = [
prev_output, weight_ih, weight_hh, bias_concat, sequence_lens
]
inputs.append(retrieve_state(h0, *state_indices))
if variant == 'LSTM':
inputs.append(retrieve_state(c0, *state_indices))
extra_kwargs = {} if unidirectional else {
'direction_s': 'bidirectional'
}
if variant == 'RNN':
if bidirectional:
activation = [nonlinearity, nonlinearity]
else:
activation = [nonlinearity]
prev_output, h_out = g.op(
'RNN',
*inputs,
outputs=2,
hidden_size_i=hidden_size,
activations_s=activation,
**extra_kwargs)
elif variant == 'GRU':
prev_output, h_out = g.op(
'GRU',
*inputs,
outputs=2,
hidden_size_i=hidden_size,
linear_before_reset_i=1,
**extra_kwargs)
elif variant == 'LSTM':
# g.op will add some node to h0 and c0,
# which is not necessary for us
prev_output, h_out, c_out = g.op(
'ncnn::LSTM',
*inputs,
outputs=3,
hidden_size_i=hidden_size,
**extra_kwargs)
if bidirectional:
# The ONNX RNN/GRU/LSTM produce an output of dimensions
# seq_len, num_directions, batch, hidden_size
# We have to convert to match pytorch's expected
# seq_len, batch, num_directions * hidden_size
# by first moving num_directions before hidden_size with
# Transpose, and then combining it with hidden_size
# with Reshape.
prev_output = g.op('Transpose', prev_output, perm_i=[0, 2, 1, 3])
prev_output = g.op(
'Reshape', prev_output,
g.op('Constant', value_t=torch.LongTensor([0, 0, -1])))
else:
prev_output = sym_help._squeeze_helper(g, prev_output, [1])
h_outs.append(h_out)
if variant == 'LSTM':
c_outs.append(c_out)
if batch_first:
# seq, batch, num_directions * hidden_size -> batch, seq,
# num_directions * hidden_size
prev_output = g.op('Transpose', prev_output, perm_i=[1, 0, 2])
h_outs = h_out if num_layers == 1 else g.op('Concat', *h_outs, axis_i=0)
if variant == 'RNN' or variant == 'GRU':
return prev_output, h_outs
elif variant == 'LSTM':
c_outs = c_out if num_layers == 1 else g.op(
'Concat', *c_outs, axis_i=0)
return prev_output, h_outs, c_outs | rewrite of _generic_rnn for ncnn. `g.op` will add some nodes for h0 and c0 in LSTM. which is not supported in ncnn. So we add a custom domain to avoid it. |
188,587 |
The provided code snippet includes necessary dependencies for implementing the `adaptive_avg_pool2d__ncnn` function. Write a Python function `def adaptive_avg_pool2d__ncnn(g, x, output_size)` to solve the following problem:
Register ncnn symbolic function for `adaptive_avg_pool2d`. Align symbolic of adaptive_avg_pool2d in ncnn.
Here is the function:
def adaptive_avg_pool2d__ncnn(g, x, output_size):
"""Register ncnn symbolic function for `adaptive_avg_pool2d`.
Align symbolic of adaptive_avg_pool2d in ncnn.
"""
return g.op('mmdeploy::AdaptiveAvgPool2d', x, output_size) | Register ncnn symbolic function for `adaptive_avg_pool2d`. Align symbolic of adaptive_avg_pool2d in ncnn. |
188,588 | from torch.onnx.symbolic_helper import parse_args
from mmdeploy.core import SYMBOLIC_REWRITER
from mmdeploy.utils import Backend
def linear_no_bias(g, input, weight):
"""Symbolic function for `linear` without bias.
PyTorch `nn.Linear` will be exported as ONNX node 'Gemm'.
"""
return g.op(
'Gemm', input, weight, alpha_f=1.0, beta_f=1.0, transA_i=0, transB_i=1)
def linear_normal(g, input, weight, bias):
"""Symbolic function for `linear`.
PyTorch `nn.Linear` will be exported as ONNX node 'Gemm'.
"""
return g.op(
'Gemm',
input,
weight,
bias,
alpha_f=1.0,
beta_f=1.0,
transA_i=0,
transB_i=1)
'linear', is_pytorch=True, backend=Backend.NCNN.value)
The provided code snippet includes necessary dependencies for implementing the `linear__ncnn` function. Write a Python function `def linear__ncnn(g, input, weight, bias)` to solve the following problem:
Support export linear This rewrite enable export Gemm.
Here is the function:
def linear__ncnn(g, input, weight, bias):
"""Support export linear This rewrite enable export Gemm."""
if bias is None:
return linear_no_bias(g, input, weight)
else:
return linear_normal(g, input, weight, bias) | Support export linear This rewrite enable export Gemm. |
188,589 | import math
from typing import Optional, Tuple
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils.constants import Backend
class ScaledDotProductAttentionTRT(torch.autograd.Function):
"""Caller of scale dot product attention."""
def forward(ctx,
q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None):
"""forward function."""
B, Nt, E = q.shape
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
attn = torch.bmm(q, k.transpose(-2, -1))
if attn_mask is not None:
attn += attn_mask
attn = attn.softmax(-1)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output, attn
def symbolic(g, q, k, v, mask):
"""Symbolic function."""
inputs = [q, k, v]
if mask is not None:
inputs += [mask]
return g.op(
'mmdeploy::ScaledDotProductAttentionTRT', *inputs, outputs=2)
func_name='torch.nn.functional._scaled_dot_product_attention',
backend=Backend.TENSORRT.value)
The provided code snippet includes necessary dependencies for implementing the `_scaled_dot_product_attention__tensorrt` function. Write a Python function `def _scaled_dot_product_attention__tensorrt(q: Tensor, k: Tensor, v: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, **kwargs) -> Tuple[Tensor, Tensor]` to solve the following problem:
Rewrite for custom ops.
Here is the function:
def _scaled_dot_product_attention__tensorrt(q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
**kwargs) -> Tuple[Tensor, Tensor]:
"""Rewrite for custom ops."""
return ScaledDotProductAttentionTRT.apply(q, k, v, attn_mask) | Rewrite for custom ops. |
188,590 | import math
from typing import Optional, Tuple
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils.constants import Backend
The provided code snippet includes necessary dependencies for implementing the `scaled_dot_product_attention__default` function. Write a Python function `def scaled_dot_product_attention__default(query, key, value, attn_mask=None, dropout_p=0., scale=None, is_causal=False)` to solve the following problem:
Rewrite to export to onnx on torch>=2.0.0.
Here is the function:
def scaled_dot_product_attention__default(query,
key,
value,
attn_mask=None,
dropout_p=0.,
scale=None,
is_causal=False):
"""Rewrite to export to onnx on torch>=2.0.0."""
scale = scale or query.size(-1)**0.5
if is_causal and attn_mask is not None:
attn_mask = torch.ones(
query.size(-2), key.size(-2), dtype=torch.bool).tril(diagonal=0)
if attn_mask is not None and attn_mask.dtype == torch.bool:
attn_mask = attn_mask.masked_fill(not attn_mask, -float('inf'))
attn_weight = query @ key.transpose(-2, -1) / scale
if attn_mask is not None:
attn_weight += attn_mask
attn_weight = torch.softmax(attn_weight, dim=-1)
attn_weight = torch.dropout(attn_weight, dropout_p, True)
return attn_weight @ value | Rewrite to export to onnx on torch>=2.0.0. |
188,591 |
The provided code snippet includes necessary dependencies for implementing the `group_norm__ncnn` function. Write a Python function `def group_norm__ncnn( input: torch.Tensor, num_groups: int, weight: Union[torch.Tensor, torch.NoneType] = None, bias: Union[torch.Tensor, torch.NoneType] = None, eps: float = 1e-05, ) -> torch.Tensor` to solve the following problem:
Rewrite `group_norm` for ncnn backend. InstanceNorm in ncnn require input with shape [C, H, W]. So we have to reshape the input tensor before it.
Here is the function:
def group_norm__ncnn(
input: torch.Tensor,
num_groups: int,
weight: Union[torch.Tensor, torch.NoneType] = None,
bias: Union[torch.Tensor, torch.NoneType] = None,
eps: float = 1e-05,
) -> torch.Tensor:
"""Rewrite `group_norm` for ncnn backend.
InstanceNorm in ncnn require input with shape [C, H, W]. So we have to
reshape the input tensor before it.
"""
input_shape = input.shape
batch_size = input_shape[0]
# We cannot use input.reshape(batch_size, num_groups, -1, 1)
# instead, or we will meet bug on ncnn Reshape ops.
input_reshaped = input.reshape(batch_size, num_groups, -1)
input_reshaped = input_reshaped.unsqueeze(3)
# the weight_'s size is not the same as weight's size
# we only use groupnorm to calculate instancenorm, but the
# input parameters may not be the same, and need to transform.
weight_ = torch.tensor([1.] * num_groups).type_as(input)
bias_ = torch.tensor([0.] * num_groups).type_as(input)
norm_reshaped = torch.nn.functional.instance_norm(
input_reshaped, weight=weight_, bias=bias_, eps=eps)
norm = norm_reshaped.reshape(*input_shape)
if weight is None:
weight = torch.tensor([1.]).type_as(input)
if bias is None:
bias = torch.tensor([0.]).type_as(input)
weight = weight.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return norm * weight + bias | Rewrite `group_norm` for ncnn backend. InstanceNorm in ncnn require input with shape [C, H, W]. So we have to reshape the input tensor before it. |
188,592 | import torch
import torch.onnx.symbolic_helper as sym_help
from packaging.version import parse as version_parse
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `_prepare_onnx_paddings__tensorrt` function. Write a Python function `def _prepare_onnx_paddings__tensorrt(g, input, pad)` to solve the following problem:
Rewrite `_prepare_onnx_paddings` for TensorRT backend. For codes like `x = torch.nn.ZeroPad2d((0, a, 0, b))(x)`, where a and b are variables of torch.tensor, onnx2tensorrt raises errors like `INVALID_NODE: Invalid Node - Pad_`. Generate paddings in ONNX order based on pad in pytorch. Args: input: the input tensor. pad: the paddings in pytorch. The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ..., dim_m_begin, dim_m_end, where m is in range [0, n].
Here is the function:
def _prepare_onnx_paddings__tensorrt(g, input, pad):
"""Rewrite `_prepare_onnx_paddings` for TensorRT backend.
For codes like `x = torch.nn.ZeroPad2d((0, a, 0, b))(x)`, where a and b are
variables of torch.tensor, onnx2tensorrt raises errors like
`INVALID_NODE: Invalid Node - Pad_`.
Generate paddings in ONNX order based on pad in pytorch.
Args:
input: the input tensor.
pad: the paddings in pytorch.
The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end,
..., dim_m_begin, dim_m_end,
where m is in range [0, n].
"""
ctx = FUNCTION_REWRITER.get_context()
torch_version = version_parse(torch.__version__)
if torch_version.major == 1 and torch_version.minor < 10:
return ctx.origin_func(g, input, pad)
# The desired order of paddings is
# dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.
# n is the dimension of input.
# Assume zero-dimensions in the beginning, pad the "pad" sequence with
# zeros in the beginning
pad_len = torch.onnx.symbolic_opset9.size(
g, pad, g.op('Constant', value_t=torch.tensor([0])))
# Set extension = [0] * (dim * 2 - len(pad))
rank = sym_help._get_tensor_rank(input)
if rank is None:
rank = g.op('Size', g.op('Shape', input))
else:
rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))
extension = g.op(
'Sub',
g.op('Mul', rank,
g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),
pad_len)
# Concat pad with extension: paddings = [dim_n_begin, dim_n_end,
# dim_n-1_begin, dim_n-1_end, 0, 0, ... ]
# Currently ONNX only supports int64 type for Pad
pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])
paddings = g.op(
'Concat',
pad,
g.op(
'ConstantOfShape',
extension,
value_t=torch.tensor([0], dtype=torch.int64)),
axis_i=0)
# Reshape and reverse order and collate first beginnings and then ends
# paddings = [[..., 0, dim_n-1_begin, dim_n_begin],
# [..., 0, dim_n-1_end, dim_n_end]]
# Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,
# ..., 0, dim_n - 1_end, dim_n_end]
# replace original Constant-Transpose-Constant with Slices and Concat.
paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])
begins = sym_help._slice_helper(
g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])
ends = sym_help._slice_helper(
g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])
paddings = g.op('Concat', begins, ends, axis_i=0)
padding_c = g.op(
'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])
return padding_c | Rewrite `_prepare_onnx_paddings` for TensorRT backend. For codes like `x = torch.nn.ZeroPad2d((0, a, 0, b))(x)`, where a and b are variables of torch.tensor, onnx2tensorrt raises errors like `INVALID_NODE: Invalid Node - Pad_`. Generate paddings in ONNX order based on pad in pytorch. Args: input: the input tensor. pad: the paddings in pytorch. The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ..., dim_m_begin, dim_m_end, where m is in range [0, n]. |
188,593 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import IR
func_name='torch.Tensor.chunk', backend='ncnn')
def chunk__ncnn(self, num_chunks: int, dim: int = 0) -> torch.Tensor:
"""Rewrite `chunk` for NCNN backend.
Chunk in ncnn are not supported, so it should be rewritten.
"""
dim_len = self.shape[dim]
# int ceil.
step = dim_len // num_chunks
if dim_len % num_chunks > 0:
step += 1
index_list = []
index = 0
while index < dim_len:
index_list.append(index)
index += step
index_list.append(dim_len)
output = [
self.index_select(
dim,
torch.tensor([j for j in range(index_list[i], index_list[i + 1])],
dtype=torch.int64))
for i in range(len(index_list) - 1)
]
return output
func_name='torch.Tensor.chunk', ir=IR.TORCHSCRIPT)
def chunk__torchscript(self, num_chunks: int, dim: int = 0) -> torch.Tensor:
"""Rewrite `chunk` for Torchscript.
Replace chunk op with split op
"""
dim_size = self.shape[dim]
assert dim_size % num_chunks == 0, 'cannot split to equal sizes'
output = self.split(dim_size // num_chunks, dim=dim)
return output
The provided code snippet includes necessary dependencies for implementing the `chunk__ncnn` function. Write a Python function `def chunk__ncnn(self, num_chunks: int, dim: int = 0) -> torch.Tensor` to solve the following problem:
Rewrite `chunk` for NCNN backend. Chunk in ncnn are not supported, so it should be rewritten.
Here is the function:
def chunk__ncnn(self, num_chunks: int, dim: int = 0) -> torch.Tensor:
"""Rewrite `chunk` for NCNN backend.
Chunk in ncnn are not supported, so it should be rewritten.
"""
dim_len = self.shape[dim]
# int ceil.
step = dim_len // num_chunks
if dim_len % num_chunks > 0:
step += 1
index_list = []
index = 0
while index < dim_len:
index_list.append(index)
index += step
index_list.append(dim_len)
output = [
self.index_select(
dim,
torch.tensor([j for j in range(index_list[i], index_list[i + 1])],
dtype=torch.int64))
for i in range(len(index_list) - 1)
]
return output | Rewrite `chunk` for NCNN backend. Chunk in ncnn are not supported, so it should be rewritten. |
188,594 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import IR
func_name='torch.Tensor.chunk', backend='ncnn')
def chunk__ncnn(self, num_chunks: int, dim: int = 0) -> torch.Tensor:
"""Rewrite `chunk` for NCNN backend.
Chunk in ncnn are not supported, so it should be rewritten.
"""
dim_len = self.shape[dim]
# int ceil.
step = dim_len // num_chunks
if dim_len % num_chunks > 0:
step += 1
index_list = []
index = 0
while index < dim_len:
index_list.append(index)
index += step
index_list.append(dim_len)
output = [
self.index_select(
dim,
torch.tensor([j for j in range(index_list[i], index_list[i + 1])],
dtype=torch.int64))
for i in range(len(index_list) - 1)
]
return output
func_name='torch.Tensor.chunk', ir=IR.TORCHSCRIPT)
def chunk__torchscript(self, num_chunks: int, dim: int = 0) -> torch.Tensor:
"""Rewrite `chunk` for Torchscript.
Replace chunk op with split op
"""
dim_size = self.shape[dim]
assert dim_size % num_chunks == 0, 'cannot split to equal sizes'
output = self.split(dim_size // num_chunks, dim=dim)
return output
The provided code snippet includes necessary dependencies for implementing the `chunk__torchscript` function. Write a Python function `def chunk__torchscript(self, num_chunks: int, dim: int = 0) -> torch.Tensor` to solve the following problem:
Rewrite `chunk` for Torchscript. Replace chunk op with split op
Here is the function:
def chunk__torchscript(self, num_chunks: int, dim: int = 0) -> torch.Tensor:
"""Rewrite `chunk` for Torchscript.
Replace chunk op with split op
"""
dim_size = self.shape[dim]
assert dim_size % num_chunks == 0, 'cannot split to equal sizes'
output = self.split(dim_size // num_chunks, dim=dim)
return output | Rewrite `chunk` for Torchscript. Replace chunk op with split op |
188,595 | import torch
from mmdeploy.core import FUNCTION_REWRITER
func_name='torch.Tensor.size', backend='ncnn')
def tensor__size__ncnn(self, *args):
"""Rewrite `size` for ncnn backend.
ONNX Shape node is not supported in ncnn. This function return integer
instead of Torch.Size to avoid ONNX Shape node.
"""
ctx = FUNCTION_REWRITER.get_context()
ret = ctx.origin_func(self, *args)
if isinstance(ret, torch.Tensor):
ret = int(ret)
elif isinstance(ret, int):
return (ret)
else:
ret = [int(r) for r in ret]
ret = tuple(ret)
return ret
func_name='torch.Tensor.size', backend='ascend')
def tensor__size__ascend(self, *args):
"""Rewrite `size` for ascens backend.
Support negative index.
"""
ctx = FUNCTION_REWRITER.get_context()
if len(args) != 0:
index = args[0]
if index < 0:
index = self.dim() + index
args = (index, )
return ctx.origin_func(self, *args)
The provided code snippet includes necessary dependencies for implementing the `tensor__size__ncnn` function. Write a Python function `def tensor__size__ncnn(self, *args)` to solve the following problem:
Rewrite `size` for ncnn backend. ONNX Shape node is not supported in ncnn. This function return integer instead of Torch.Size to avoid ONNX Shape node.
Here is the function:
def tensor__size__ncnn(self, *args):
"""Rewrite `size` for ncnn backend.
ONNX Shape node is not supported in ncnn. This function return integer
instead of Torch.Size to avoid ONNX Shape node.
"""
ctx = FUNCTION_REWRITER.get_context()
ret = ctx.origin_func(self, *args)
if isinstance(ret, torch.Tensor):
ret = int(ret)
elif isinstance(ret, int):
return (ret)
else:
ret = [int(r) for r in ret]
ret = tuple(ret)
return ret | Rewrite `size` for ncnn backend. ONNX Shape node is not supported in ncnn. This function return integer instead of Torch.Size to avoid ONNX Shape node. |
188,596 |
The provided code snippet includes necessary dependencies for implementing the `tensor__size__ascend` function. Write a Python function `def tensor__size__ascend(self, *args)` to solve the following problem:
Rewrite `size` for ascens backend. Support negative index.
Here is the function:
def tensor__size__ascend(self, *args):
"""Rewrite `size` for ascens backend.
Support negative index.
"""
ctx = FUNCTION_REWRITER.get_context()
if len(args) != 0:
index = args[0]
if index < 0:
index = self.dim() + index
args = (index, )
return ctx.origin_func(self, *args) | Rewrite `size` for ascens backend. Support negative index. |
188,597 |
The provided code snippet includes necessary dependencies for implementing the `normalize__ncnn` function. Write a Python function `def normalize__ncnn(input: torch.Tensor, p: int = 2, dim: int = 1, eps: float = 1e-12, *args, **kwargs)` to solve the following problem:
Rewrite `normalize` for ncnn backend. Make sure L2 norm on channel dim and be exported to ncnn correctly.
Here is the function:
def normalize__ncnn(input: torch.Tensor,
p: int = 2,
dim: int = 1,
eps: float = 1e-12,
*args,
**kwargs):
"""Rewrite `normalize` for ncnn backend.
Make sure L2 norm on channel dim and be exported to ncnn correctly.
"""
ctx = FUNCTION_REWRITER.get_context()
if dim < 0:
dim += input.ndim
assert dim != 0, 'Should not normalize on batch index'
origin_func = ctx.origin_func
assert p == 2, 'only support L2 norm'
assert input.ndim in [3, 4]
assert input.shape[0] == 1, \
f'only support batch size 1, but given {input.shape[0]}'
if input.ndim == 3:
output = origin_func(
input.transpose(1, dim).unsqueeze(2), p=p, dim=1,
eps=eps).squeeze(2).transpose(1, dim)
else:
# input.ndim == 4:
if dim == 1:
output = origin_func(input, p=p, dim=dim, eps=eps)
else:
output = origin_func(
input.transpose(1, dim), p=p, dim=1,
eps=eps).transpose(1, dim)
return output | Rewrite `normalize` for ncnn backend. Make sure L2 norm on channel dim and be exported to ncnn correctly. |
188,598 |
The provided code snippet includes necessary dependencies for implementing the `norm__ncnn` function. Write a Python function `def norm__ncnn(input: torch.Tensor, p: Optional[Union[int, str]] = 'fro', dim: Optional[Union[int, Sequence]] = None, keepdim: Optional[bool] = False, out: Optional[torch.Tensor] = None, dtype: Optional[torch.dtype] = None)` to solve the following problem:
Rewrite `torch.norm` for ncnn backend. Rewrite torch.norm when p is Frobenius norm to avoid FP16 exceed in ncnn Android platform.
Here is the function:
def norm__ncnn(input: torch.Tensor,
p: Optional[Union[int, str]] = 'fro',
dim: Optional[Union[int, Sequence]] = None,
keepdim: Optional[bool] = False,
out: Optional[torch.Tensor] = None,
dtype: Optional[torch.dtype] = None):
"""Rewrite `torch.norm` for ncnn backend.
Rewrite torch.norm when p is Frobenius norm to avoid FP16 exceed in ncnn
Android platform.
"""
ctx = FUNCTION_REWRITER.get_context()
origin_func = ctx.origin_func
if p == 'fro' and (isinstance(dim, int) or len(dim) == 1):
# Substitute Frobenius norm with L2 norm.
return origin_func(
input, p=2, dim=dim, keepdim=keepdim, out=out, dtype=dtype)
else:
return origin_func(
input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype) | Rewrite `torch.norm` for ncnn backend. Rewrite torch.norm when p is Frobenius norm to avoid FP16 exceed in ncnn Android platform. |
188,599 | from typing import Iterable
import torch
from mmdeploy.core import FUNCTION_REWRITER
func_name='torch.Tensor.__getitem__', backend='ascend')
def tensor__getitem__ascend(self, key) -> torch.Tensor:
"""Rewrite `getitem` for ascend backend.
Ascend does not support negative select
"""
ctx = FUNCTION_REWRITER.get_context()
if not isinstance(key, (tuple, list)):
if isinstance(key, int) and key < 0:
key = self.dim() + key
return ctx.origin_func(self, key)
def _num_slice_types(slices):
num_slice = 0
for s in slices:
if isinstance(s, slice) or isinstance(s, int) or isinstance(
s, Iterable):
num_slice += 1
return num_slice
shape = self.shape
new_key = list(key)
num_ellipsis = len(shape) - _num_slice_types(new_key)
dim_count = 0
for i, k in enumerate(new_key):
if isinstance(k, int):
if k < 0:
new_key[i] = shape[dim_count] + k
if k == Ellipsis:
dim_count = dim_count + num_ellipsis
elif k is not None:
dim_count += 1
return ctx.origin_func(self, new_key)
The provided code snippet includes necessary dependencies for implementing the `tensor__getitem__ascend` function. Write a Python function `def tensor__getitem__ascend(self, key) -> torch.Tensor` to solve the following problem:
Rewrite `getitem` for ascend backend. Ascend does not support negative select
Here is the function:
def tensor__getitem__ascend(self, key) -> torch.Tensor:
"""Rewrite `getitem` for ascend backend.
Ascend does not support negative select
"""
ctx = FUNCTION_REWRITER.get_context()
if not isinstance(key, (tuple, list)):
if isinstance(key, int) and key < 0:
key = self.dim() + key
return ctx.origin_func(self, key)
def _num_slice_types(slices):
num_slice = 0
for s in slices:
if isinstance(s, slice) or isinstance(s, int) or isinstance(
s, Iterable):
num_slice += 1
return num_slice
shape = self.shape
new_key = list(key)
num_ellipsis = len(shape) - _num_slice_types(new_key)
dim_count = 0
for i, k in enumerate(new_key):
if isinstance(k, int):
if k < 0:
new_key[i] = shape[dim_count] + k
if k == Ellipsis:
dim_count = dim_count + num_ellipsis
elif k is not None:
dim_count += 1
return ctx.origin_func(self, new_key) | Rewrite `getitem` for ascend backend. Ascend does not support negative select |
188,600 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend
func_name='torch.Tensor.clip', backend=Backend.COREML.value)
func_name='torch.clip', backend=Backend.COREML.value)
func_name='torch.Tensor.clamp', backend=Backend.COREML.value)
func_name='torch.clamp', backend=Backend.COREML.value)
def clip__coreml(input, min=None, max=None, **kwargs) -> torch.Tensor:
"""Rewrite `clip` for coreml backend.
Cast data type.
"""
ctx = FUNCTION_REWRITER.get_context()
if min is not None and not isinstance(min, torch.Tensor):
min = input.new_tensor(min)
if max is not None and not isinstance(max, torch.Tensor):
max = input.new_tensor(max)
return ctx.origin_func(input, min=min, max=max, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `clip__coreml` function. Write a Python function `def clip__coreml(input, min=None, max=None, **kwargs) -> torch.Tensor` to solve the following problem:
Rewrite `clip` for coreml backend. Cast data type.
Here is the function:
def clip__coreml(input, min=None, max=None, **kwargs) -> torch.Tensor:
"""Rewrite `clip` for coreml backend.
Cast data type.
"""
ctx = FUNCTION_REWRITER.get_context()
if min is not None and not isinstance(min, torch.Tensor):
min = input.new_tensor(min)
if max is not None and not isinstance(max, torch.Tensor):
max = input.new_tensor(max)
return ctx.origin_func(input, min=min, max=max, **kwargs) | Rewrite `clip` for coreml backend. Cast data type. |
188,601 | from typing import Optional, Tuple, Union
import torch
from torch.autograd import Function
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_root_logger
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor)
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor)
The provided code snippet includes necessary dependencies for implementing the `interpolate__ncnn` function. Write a Python function `def interpolate__ncnn(input: torch.Tensor, size: Optional[Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]]] = None, scale_factor: Optional[Union[float, Tuple[float]]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None)` to solve the following problem:
Rewrite `interpolate` for ncnn backend. ncnn require `size` should be constant in ONNX Node. We use `scale_factor` instead of `size` to avoid dynamic size.
Here is the function:
def interpolate__ncnn(input: torch.Tensor,
size: Optional[Union[int, Tuple[int], Tuple[int, int],
Tuple[int, int, int]]] = None,
scale_factor: Optional[Union[float,
Tuple[float]]] = None,
mode: str = 'nearest',
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None):
"""Rewrite `interpolate` for ncnn backend.
ncnn require `size` should be constant in ONNX Node. We use `scale_factor`
instead of `size` to avoid dynamic size.
"""
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
if scale_factor is None:
scale_factor = [
s_out / s_in for s_out, s_in in zip(size, input_size[2:])
]
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor) | Rewrite `interpolate` for ncnn backend. ncnn require `size` should be constant in ONNX Node. We use `scale_factor` instead of `size` to avoid dynamic size. |
188,602 | from typing import Optional, Tuple, Union
import torch
from torch.autograd import Function
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_root_logger
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor)
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor)
The provided code snippet includes necessary dependencies for implementing the `interpolate__rknn` function. Write a Python function `def interpolate__rknn(input: torch.Tensor, size: Optional[Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]]] = None, scale_factor: Optional[Union[float, Tuple[float]]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None)` to solve the following problem:
Rewrite `interpolate` for rknn backend. rknn require `size` should be constant in ONNX Node. We use `scale_factor` instead of `size` to avoid dynamic size.
Here is the function:
def interpolate__rknn(input: torch.Tensor,
size: Optional[Union[int, Tuple[int], Tuple[int, int],
Tuple[int, int, int]]] = None,
scale_factor: Optional[Union[float,
Tuple[float]]] = None,
mode: str = 'nearest',
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None):
"""Rewrite `interpolate` for rknn backend.
rknn require `size` should be constant in ONNX Node. We use `scale_factor`
instead of `size` to avoid dynamic size.
"""
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
if scale_factor is None:
scale_factor = [(s_out / s_in)
for s_out, s_in in zip(size, input_size[2:])]
if isinstance(scale_factor[0], torch.Tensor):
scale_factor = [i.item() for i in scale_factor]
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor) | Rewrite `interpolate` for rknn backend. rknn require `size` should be constant in ONNX Node. We use `scale_factor` instead of `size` to avoid dynamic size. |
188,603 | from typing import Optional, Tuple, Union
import torch
from torch.autograd import Function
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_root_logger
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor)
ctx = FUNCTION_REWRITER.get_context()
input_size = input.shape
return ctx.origin_func(
input,
None,
scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor)
The provided code snippet includes necessary dependencies for implementing the `interpolate__tensorrt` function. Write a Python function `def interpolate__tensorrt( input: torch.Tensor, size: Optional[Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]]] = None, scale_factor: Optional[Union[float, Tuple[float]]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, )` to solve the following problem:
Register default symbolic function for `interpolate`.
Here is the function:
def interpolate__tensorrt(
input: torch.Tensor,
size: Optional[Union[int, Tuple[int], Tuple[int, int], Tuple[int, int,
int]]] = None,
scale_factor: Optional[Union[float, Tuple[float]]] = None,
mode: str = 'nearest',
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None,
):
"""Register default symbolic function for `interpolate`."""
ctx = FUNCTION_REWRITER.get_context()
class BicubicInterpolate(Function):
def __init__(self) -> None:
super().__init__()
@staticmethod
def symbolic(g, input, scale_factor, align_corners):
"""Symbolic function for creating onnx op."""
return g.op(
'mmdeploy::TRTBicubicInterpolate',
input,
scale_factor_f=scale_factor,
align_corners_i=align_corners)
@staticmethod
def forward(g, input, scale_factor, align_corners):
"""Run forward."""
return ctx.origin_func(
input,
scale_factor=scale_factor,
mode='bicubic',
align_corners=align_corners)
if 'bicubic' == mode:
input_size = input.shape
if isinstance(scale_factor, float):
scale_factor = [scale_factor, scale_factor]
if scale_factor is None:
logger = get_root_logger()
logger.warning(
'ResizeLayer in TensorRT allow dynamic input shape with shape '
'tensor. Which is not available for custom ops. Computed scale'
'_factor might be the right way to get final shape.')
scale_factor = [
float(s_out / s_in)
for s_out, s_in in zip(size, input_size[2:])
]
return BicubicInterpolate.apply(input, scale_factor, align_corners)
else:
return ctx.origin_func(
input,
size=size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor) | Register default symbolic function for `interpolate`. |
188,604 |
The provided code snippet includes necessary dependencies for implementing the `mod__tensorrt` function. Write a Python function `def mod__tensorrt(input: torch.Tensor, other: Union[torch.Tensor, torch.NumberType], *args, **kwargs) -> torch.Tensor` to solve the following problem:
Rewrite `mod` when exporting model to ONNX for TensorRT backend.
Here is the function:
def mod__tensorrt(input: torch.Tensor, other: Union[torch.Tensor,
torch.NumberType], *args,
**kwargs) -> torch.Tensor:
"""Rewrite `mod` when exporting model to ONNX for TensorRT backend."""
ctx = FUNCTION_REWRITER.get_context()
if version.parse(torch.__version__) > version.parse('1.10.0'):
return input - (input // other) * other
return ctx.origin_func(input, other, *args, **kwargs) | Rewrite `mod` when exporting model to ONNX for TensorRT backend. |
188,605 | from typing import Sequence
import torch
from packaging.version import parse
from mmdeploy.core import FUNCTION_REWRITER, SYMBOLIC_REWRITER
if parse(torch.__version__) >= parse('1.12.0'):
The provided code snippet includes necessary dependencies for implementing the `tensor__setitem__default` function. Write a Python function `def tensor__setitem__default(self, key, value)` to solve the following problem:
Rewrite `setitem` to ease the index put.
Here is the function:
def tensor__setitem__default(self, key, value):
"""Rewrite `setitem` to ease the index put."""
ctx = FUNCTION_REWRITER.get_context()
# only support torch>=1.9.0
if parse(torch.__version__) < parse('1.9.0'):
return ctx.origin_func(self, key, value)
if isinstance(key, slice):
key = (key, )
if not isinstance(key, Sequence):
return ctx.origin_func(self, key, value)
for k in key:
if not isinstance(k, slice) or k.step is not None:
return ctx.origin_func(self, key, value)
out = value
# value could be scalar or single value Tensor
self_shape = self.shape
out_shape = list(self_shape)
for i, k in enumerate(key):
start = 0 if k.start is None else k.start
start = start if start >= 0 else self_shape[i] + start
stop = self_shape[i] if k.stop is None else k.stop
stop = stop if stop >= 0 else self_shape[i] + stop
out_shape[i] = stop - start
if not isinstance(out, torch.Tensor):
out = self.new_full(out_shape, out)
elif out.numel() == 1:
out = out.expand(out_shape)
for i, k in enumerate(key):
if k == slice(None):
continue
cat_list = []
# slice self start
if k.start is not None:
self_slice_start = (slice(None), ) * i + (slice(
0, k.start), ) + key[i + 1:]
self_start = self[self_slice_start]
cat_list.append(self_start)
# add value
cat_list.append(out)
# slice self end
if k.stop is not None:
self_slice_end = (slice(None), ) * i + (slice(
k.stop, None), ) + key[i + 1:]
self_end = self[self_slice_end]
cat_list.append(self_end)
# concate
out = torch.cat(cat_list, dim=i)
# self assign
# Note that set item does not return any value
self[...] = out | Rewrite `setitem` to ease the index put. |
188,606 | from typing import Sequence
import torch
from packaging.version import parse
from mmdeploy.core import FUNCTION_REWRITER, SYMBOLIC_REWRITER
def copy__default(g, x, y, non_blocking):
return x | null |
188,607 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend
func_name='torch.Tensor.flatten', backend=Backend.NCNN.value)
func_name='torch.flatten', backend=Backend.NCNN.value)
func_name='torch.Tensor.flatten', backend=Backend.COREML.value)
func_name='torch.flatten', backend=Backend.COREML.value)
def flatten__coreml(input, start_dim=0, end_dim=-1) -> torch.Tensor:
"""Rewrite `flatten` for coreml backend.
Use reshape instead of flatten
"""
shape = input.shape
end_dim = end_dim if end_dim > 0 else len(shape) + end_dim
shape1 = list(shape[:start_dim])
shape3 = list(shape[end_dim + 1:])
return input.reshape(shape1 + [-1] + shape3)
The provided code snippet includes necessary dependencies for implementing the `flatten__coreml` function. Write a Python function `def flatten__coreml(input, start_dim=0, end_dim=-1) -> torch.Tensor` to solve the following problem:
Rewrite `flatten` for coreml backend. Use reshape instead of flatten
Here is the function:
def flatten__coreml(input, start_dim=0, end_dim=-1) -> torch.Tensor:
"""Rewrite `flatten` for coreml backend.
Use reshape instead of flatten
"""
shape = input.shape
end_dim = end_dim if end_dim > 0 else len(shape) + end_dim
shape1 = list(shape[:start_dim])
shape3 = list(shape[end_dim + 1:])
return input.reshape(shape1 + [-1] + shape3) | Rewrite `flatten` for coreml backend. Use reshape instead of flatten |
188,608 | from typing import Optional
import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `any__default` function. Write a Python function `def any__default(input: torch.Tensor, dim: Optional[str] = None, keepdim: bool = False, **kwargs) -> torch.Tensor` to solve the following problem:
Rewrite `any` for ONNX.
Here is the function:
def any__default(input: torch.Tensor,
dim: Optional[str] = None,
keepdim: bool = False,
**kwargs) -> torch.Tensor:
"""Rewrite `any` for ONNX."""
if dim is None and keepdim is False:
return (input != 0).sum() > 0
return (input != 0).sum(dim, keepdim=keepdim) > 0 | Rewrite `any` for ONNX. |
188,609 |
The provided code snippet includes necessary dependencies for implementing the `topk__dynamic` function. Write a Python function `def topk__dynamic(input: torch.Tensor, k: int, dim: Optional[int] = None, largest: bool = True, sorted: bool = True)` to solve the following problem:
Rewrite `topk` for default backend. Cast k to tensor and make sure k is smaller than input.shape[dim].
Here is the function:
def topk__dynamic(input: torch.Tensor,
k: int,
dim: Optional[int] = None,
largest: bool = True,
sorted: bool = True):
"""Rewrite `topk` for default backend.
Cast k to tensor and make sure k is smaller than input.shape[dim].
"""
ctx = FUNCTION_REWRITER.get_context()
if dim is None:
dim = int(input.ndim - 1)
size = input.shape[dim]
if not isinstance(k, torch.Tensor):
k = torch.tensor(k, device=input.device, dtype=torch.long)
# Always keep topk op for dynamic input
if isinstance(size, torch.Tensor):
# size would be treated as cpu tensor, trick to avoid that.
size = k.new_zeros(()) + size
k = torch.where(k < size, k, size)
return ctx.origin_func(input, k, dim=dim, largest=largest, sorted=sorted) | Rewrite `topk` for default backend. Cast k to tensor and make sure k is smaller than input.shape[dim]. |
188,610 |
TENSORRT_MAX_TOPK = 3840
The provided code snippet includes necessary dependencies for implementing the `topk__tensorrt` function. Write a Python function `def topk__tensorrt(input: torch.Tensor, k: int, dim: Optional[int] = None, largest: bool = True, sorted: bool = True)` to solve the following problem:
Rewrite `topk` for TensorRT backend. TensorRT does not support topk with dynamic k. This function cast k to constant integer.
Here is the function:
def topk__tensorrt(input: torch.Tensor,
k: int,
dim: Optional[int] = None,
largest: bool = True,
sorted: bool = True):
"""Rewrite `topk` for TensorRT backend.
TensorRT does not support topk with dynamic k. This function cast k to
constant integer.
"""
ctx = FUNCTION_REWRITER.get_context()
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#topKsetup
from mmdeploy.utils.constants import TENSORRT_MAX_TOPK
if dim is None:
dim = int(input.ndim - 1)
size = input.shape[dim]
if k > size:
k = size
if not isinstance(k, int):
k = int(k)
if k > TENSORRT_MAX_TOPK:
logger = get_root_logger()
logger.warning(
f'Maximum K of TopK in TensorRT is {TENSORRT_MAX_TOPK}, '
f'but given {k}. Note that k will be set '
f'to {TENSORRT_MAX_TOPK}.')
k = TENSORRT_MAX_TOPK
return ctx.origin_func(input, k, dim=dim, largest=largest, sorted=sorted) | Rewrite `topk` for TensorRT backend. TensorRT does not support topk with dynamic k. This function cast k to constant integer. |
188,611 |
The provided code snippet includes necessary dependencies for implementing the `topk__coreml` function. Write a Python function `def topk__coreml(input: torch.Tensor, k: int, dim: Optional[int] = None, largest: bool = True, sorted: bool = True)` to solve the following problem:
Rewrite `topk` for coreml. Cast k to tensor and make sure k is smaller than input.shape[dim].
Here is the function:
def topk__coreml(input: torch.Tensor,
k: int,
dim: Optional[int] = None,
largest: bool = True,
sorted: bool = True):
"""Rewrite `topk` for coreml.
Cast k to tensor and make sure k is smaller than input.shape[dim].
"""
ctx = FUNCTION_REWRITER.get_context()
if dim is None:
dim = int(input.ndim - 1)
size = input.shape[dim]
if not isinstance(k, torch.Tensor):
k = torch.tensor(k, device=input.device, dtype=torch.long)
# Always keep topk op for dynamic input
k = torch.where(k < size, k, size)
return ctx.origin_func(input, k, dim=dim, largest=largest, sorted=sorted) | Rewrite `topk` for coreml. Cast k to tensor and make sure k is smaller than input.shape[dim]. |
188,612 | from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `copy__default` function. Write a Python function `def copy__default(tensor: Tensor, *args, **kwargs) -> Tensor` to solve the following problem:
Rewrite `copy.deepcopy` for default backend. Replace it with tensor.clone(), or may raise `NYI: Named tensors are not supported with the tracer`
Here is the function:
def copy__default(tensor: Tensor, *args, **kwargs) -> Tensor:
"""Rewrite `copy.deepcopy` for default backend.
Replace it with tensor.clone(), or may raise `NYI: Named tensors are not
supported with the tracer`
"""
ctx = FUNCTION_REWRITER.get_context()
if isinstance(tensor, Tensor) and args == () and kwargs == {}:
return tensor.clone()
return ctx.origin_func(tensor, *args, **kwargs) | Rewrite `copy.deepcopy` for default backend. Replace it with tensor.clone(), or may raise `NYI: Named tensors are not supported with the tracer` |
188,613 |
The provided code snippet includes necessary dependencies for implementing the `masked_fill__onnxruntime` function. Write a Python function `def masked_fill__onnxruntime( input, mask: torch.Tensor, value: Union[torch.Tensor, Number]) -> torch.Tensor` to solve the following problem:
Rewrite `masked_fill` for onnxruntime backend. SATRN model as example, when value is set to `float('-inf')`, the results of ORT inferencing turns out to be NAN.
Here is the function:
def masked_fill__onnxruntime(
input, mask: torch.Tensor, value: Union[torch.Tensor,
Number]) -> torch.Tensor:
"""Rewrite `masked_fill` for onnxruntime backend.
SATRN model as example, when value is set to `float('-inf')`, the results
of ORT inferencing turns out to be NAN.
"""
ctx = FUNCTION_REWRITER.get_context()
if value == float('-inf'):
value = -1e34 # hard coding number
return ctx.origin_func(input, mask, value) | Rewrite `masked_fill` for onnxruntime backend. SATRN model as example, when value is set to `float('-inf')`, the results of ORT inferencing turns out to be NAN. |
188,614 |
The provided code snippet includes necessary dependencies for implementing the `tensor__repeat__tensorrt` function. Write a Python function `def tensor__repeat__tensorrt(input: torch.Tensor, *size: Union[torch.Size, Sequence[int]])` to solve the following problem:
Rewrite `repeat` for TensorRT backend. Some layers in TensorRT can not be applied on batch axis. add extra axis before operation and remove it afterward.
Here is the function:
def tensor__repeat__tensorrt(input: torch.Tensor, *size: Union[torch.Size,
Sequence[int]]):
"""Rewrite `repeat` for TensorRT backend.
Some layers in TensorRT can not be applied on batch axis. add extra axis
before operation and remove it afterward.
"""
ctx = FUNCTION_REWRITER.get_context()
origin_func = ctx.origin_func
if input.dim() == 1 and len(size) == 1:
if isinstance(*size, tuple):
return origin_func(input.unsqueeze(0),
*([1] + list(*size))).squeeze(0)
return origin_func(input.unsqueeze(0), *([1] + list(size))).squeeze(0)
else:
return origin_func(input, *size) | Rewrite `repeat` for TensorRT backend. Some layers in TensorRT can not be applied on batch axis. add extra axis before operation and remove it afterward. |
188,615 | import torch
from mmdeploy.core import FUNCTION_REWRITER
func_name='torch.Tensor.expand', backend='ncnn')
def expand__ncnn(self, *sizes) -> torch.Tensor:
"""Rewrite `expand` for NCNN backend.
Do not expand on batch dim for tensor with ndim >= 3
"""
ctx = FUNCTION_REWRITER.get_context()
if self.ndim < 3 or sizes[0] not in [1, -1]:
return ctx.origin_func(*sizes)
return self
The provided code snippet includes necessary dependencies for implementing the `expand__ncnn` function. Write a Python function `def expand__ncnn(self, *sizes) -> torch.Tensor` to solve the following problem:
Rewrite `expand` for NCNN backend. Do not expand on batch dim for tensor with ndim >= 3
Here is the function:
def expand__ncnn(self, *sizes) -> torch.Tensor:
"""Rewrite `expand` for NCNN backend.
Do not expand on batch dim for tensor with ndim >= 3
"""
ctx = FUNCTION_REWRITER.get_context()
if self.ndim < 3 or sizes[0] not in [1, -1]:
return ctx.origin_func(*sizes)
return self | Rewrite `expand` for NCNN backend. Do not expand on batch dim for tensor with ndim >= 3 |
188,616 | import torch
from torch.types import Number
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `linspace__default` function. Write a Python function `def linspace__default(start: Number, end: Number, steps: int = None, **kwargs)` to solve the following problem:
Rewrite `linspace` for onnxruntime.
Here is the function:
def linspace__default(start: Number, end: Number, steps: int = None, **kwargs):
"""Rewrite `linspace` for onnxruntime."""
steps = 100 if steps is None else steps
dtype = kwargs.pop('dtype', torch.float32)
dtype = dtype if dtype else torch.float32
if steps == 1:
output = torch.arange(start, end + 1, dtype=dtype, **kwargs)[:steps]
else:
output = torch.arange(
start, end + 1, (end - start) / (steps - 1), dtype=dtype,
**kwargs)[:steps]
return output | Rewrite `linspace` for onnxruntime. |
188,617 | from typing import Sequence
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_dynamic_axes
The provided code snippet includes necessary dependencies for implementing the `cat__tensorrt` function. Write a Python function `def cat__tensorrt(tensors: Sequence[Tensor], *args, **kwargs) -> torch.Tensor` to solve the following problem:
Rewrite `cat` for TensorRT backend. cat in TensorRT does not support bool or uint8 type when input is dynamic.
Here is the function:
def cat__tensorrt(tensors: Sequence[Tensor], *args, **kwargs) -> torch.Tensor:
"""Rewrite `cat` for TensorRT backend.
cat in TensorRT does not support bool or uint8 type when input is dynamic.
"""
ctx = FUNCTION_REWRITER.get_context()
if get_dynamic_axes(ctx.cfg) is None:
return ctx.origin_func(tensors, *args, **kwargs)
if len(tensors) > 0 and (tensors[0].dtype in [torch.bool, torch.uint8]):
original_dtype = tensors[0].dtype
tensors = [i.to(torch.int32) for i in tensors]
return ctx.origin_func(tensors, *args, **kwargs).to(original_dtype)
return ctx.origin_func(tensors, *args, **kwargs) | Rewrite `cat` for TensorRT backend. cat in TensorRT does not support bool or uint8 type when input is dynamic. |
188,618 | import torch
from mmdeploy.core import FUNCTION_REWRITER
func_name='torch.Tensor.__getattribute__', backend='ncnn')
def tensor__getattribute__ncnn(self: torch.Tensor, name: str):
"""Rewrite `__getattribute__` of `torch.Tensor` for ncnn backend.
Shape node is not supported by ncnn. This function transform dynamic shape
to constant shape.
"""
ctx = FUNCTION_REWRITER.get_context()
ret = ctx.origin_func(self, name)
if name == 'shape':
ret = torch.Size([int(s) for s in ret])
return ret
The provided code snippet includes necessary dependencies for implementing the `tensor__getattribute__ncnn` function. Write a Python function `def tensor__getattribute__ncnn(self: torch.Tensor, name: str)` to solve the following problem:
Rewrite `__getattribute__` of `torch.Tensor` for ncnn backend. Shape node is not supported by ncnn. This function transform dynamic shape to constant shape.
Here is the function:
def tensor__getattribute__ncnn(self: torch.Tensor, name: str):
"""Rewrite `__getattribute__` of `torch.Tensor` for ncnn backend.
Shape node is not supported by ncnn. This function transform dynamic shape
to constant shape.
"""
ctx = FUNCTION_REWRITER.get_context()
ret = ctx.origin_func(self, name)
if name == 'shape':
ret = torch.Size([int(s) for s in ret])
return ret | Rewrite `__getattribute__` of `torch.Tensor` for ncnn backend. Shape node is not supported by ncnn. This function transform dynamic shape to constant shape. |
188,619 | import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_root_logger, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `adaptive_avg_pool2d__default` function. Write a Python function `def adaptive_avg_pool2d__default(input, output_size)` to solve the following problem:
Rewrite `adaptive_avg_pool2d` for default backend.
Here is the function:
def adaptive_avg_pool2d__default(input, output_size):
"""Rewrite `adaptive_avg_pool2d` for default backend."""
ctx = FUNCTION_REWRITER.get_context()
output_size = _pair(output_size)
if int(output_size[0]) == int(output_size[1]) == 1:
out = ctx.origin_func(input, output_size)
else:
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
if is_dynamic_flag:
logger = get_root_logger()
logger.warning('`adaptive_avg_pool2d` would be '
'replaced to `avg_pool2d` explicitly')
size = input.shape[2:]
k = [int(size[i] / output_size[i]) for i in range(0, len(size))]
out = F.avg_pool2d(
input,
kernel_size=k,
stride=k,
padding=0,
ceil_mode=False,
count_include_pad=False)
return out | Rewrite `adaptive_avg_pool2d` for default backend. |
188,620 | import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend, get_root_logger, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `adaptive_avg_pool2d__ncnn` function. Write a Python function `def adaptive_avg_pool2d__ncnn(input, output_size)` to solve the following problem:
Rewrite `adaptive_avg_pool2d` for ncnn and torchscript backend.
Here is the function:
def adaptive_avg_pool2d__ncnn(input, output_size):
ctx = FUNCTION_REWRITER.get_context()
"""Rewrite `adaptive_avg_pool2d` for ncnn and torchscript backend."""
return ctx.origin_func(input, output_size) | Rewrite `adaptive_avg_pool2d` for ncnn and torchscript backend. |
188,621 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `atan2__default` function. Write a Python function `def atan2__default( input1: torch.Tensor, input2: torch.Tensor, )` to solve the following problem:
Rewrite `atan2` for default backend.
Here is the function:
def atan2__default(
input1: torch.Tensor,
input2: torch.Tensor,
):
"""Rewrite `atan2` for default backend."""
return torch.atan(input1 / (input2 + 1e-6)) | Rewrite `atan2` for default backend. |
188,622 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `triu__default` function. Write a Python function `def triu__default(input: torch.Tensor, diagonal: int = 0, *args, **kwargs) -> torch.Tensor` to solve the following problem:
Rewrite `triu` for exporting model to ONNX.
Here is the function:
def triu__default(input: torch.Tensor,
diagonal: int = 0,
*args,
**kwargs) -> torch.Tensor:
"""Rewrite `triu` for exporting model to ONNX."""
assert len(input.shape) >= 2
height, width = input.shape[-2:]
arange0 = torch.arange(width, device=input.device).unsqueeze(0)
arange1 = torch.arange(height, device=input.device).unsqueeze(-1)
mask = arange0 >= torch.add(arange1, diagonal)
return input * mask | Rewrite `triu` for exporting model to ONNX. |
188,623 | from typing import Optional, Union
import torch
from mmdeploy.core import FUNCTION_REWRITER
class GemmOp(torch.autograd.Function):
"""Create onnx::Gemm op."""
def forward(ctx, input, weight, bias=None):
out = input @ weight.transpose(0, 1)
if bias is not None:
out += bias
return out
def symbolic(g, input, weight, bias=None):
input.setDebugName('A')
weight.setDebugName('B')
args = ['Gemm', input, weight]
if bias is not None:
bias.setDebugName('C')
args.append(bias)
return g.op(*args, alpha_f=1.0, beta_f=1.0, transA_i=0, transB_i=1)
func_name='torch.nn.functional.linear', backend='ncnn')
The provided code snippet includes necessary dependencies for implementing the `linear__ncnn` function. Write a Python function `def linear__ncnn( input: torch.Tensor, weight: torch.Tensor, bias: Optional[Union[torch.Tensor, torch.NoneType]] = None, )` to solve the following problem:
Rewrite `linear` for ncnn backend. The broadcast rules are different between ncnn and PyTorch. This function add extra reshape and transpose to support linear operation of different input shape.
Here is the function:
def linear__ncnn(
input: torch.Tensor,
weight: torch.Tensor,
bias: Optional[Union[torch.Tensor, torch.NoneType]] = None,
):
"""Rewrite `linear` for ncnn backend.
The broadcast rules are different between ncnn and PyTorch. This function
add extra reshape and transpose to support linear operation of different
input shape.
"""
ctx = FUNCTION_REWRITER.get_context()
origin_func = ctx.origin_func
dim = input.dim()
if dim == 2 or dim == 3 and input.shape[0] == 1:
# export nn.linear to Gemm op in onnx
return GemmOp.apply(input, weight, bias)
else:
out = origin_func(input, weight)
# permute
out = out.transpose(1, dim - 1)
# ncnn only support [c, h, w] and [c, 1, 1] broadcast
out_shape = out.shape
batch_size = out_shape[0]
broad_cast_size = out_shape[1]
out = out.reshape([batch_size, broad_cast_size, -1, 1])
# add bias
if bias is not None:
bias = bias.view([1, -1, 1, 1])
out = out + bias
# permute back
# the last dim should be -1 to support dynamic shape
out = out.reshape(out_shape[:-1] + (-1, ))
out = out.transpose(1, dim - 1)
return out | Rewrite `linear` for ncnn backend. The broadcast rules are different between ncnn and PyTorch. This function add extra reshape and transpose to support linear operation of different input shape. |
188,624 | import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend
class MultiHeadAttentionop(torch.autograd.Function):
"""Create onnx::MultiHeadAttention op."""
def forward(ctx, q: Tensor, k: Tensor, v: Tensor, q_weight: Tensor,
q_bias: Tensor, k_weight: Tensor, k_bias: Tensor,
v_weight: Tensor, v_bias: Tensor, o_weight: Tensor,
o_bias: Tensor, embed_dims: int, num_heads: int) -> Tensor:
return torch.rand_like(q)
def symbolic(g, q: torch._C.Value, k: torch._C.Value, v: torch._C.Value,
q_weight: torch._C.Value, q_bias: torch._C.Value,
k_weight: torch._C.Value, k_bias: torch._C.Value,
v_weight: torch._C.Value, v_bias: torch._C.Value,
o_weight: torch._C.Value, o_bias: torch._C.Value,
embed_dims: int, num_heads: int):
q_weight.setDebugName('q_weight')
q_bias.setDebugName('q_bias')
k_weight.setDebugName('k_weight')
k_bias.setDebugName('k_bias')
v_weight.setDebugName('v_weight')
v_bias.setDebugName('v_bias')
o_weight.setDebugName('o_weight')
o_bias.setDebugName('o_bias')
return g.op(
'mmdeploy::MultiHeadAttention',
q,
k,
v,
q_weight,
q_bias,
k_weight,
k_bias,
v_weight,
v_bias,
o_weight,
o_bias,
embed_dim_i=embed_dims,
num_heads_i=num_heads)
func_name='mmcv.cnn.bricks.transformer.MultiheadAttention.forward',
backend=Backend.NCNN.value)
The provided code snippet includes necessary dependencies for implementing the `multiheadattention__forward__ncnn` function. Write a Python function `def multiheadattention__forward__ncnn(self, query, key=None, value=None, identity=None, query_pos=None, key_pos=None, attn_mask=None, key_padding_mask=None, **kwargs)` to solve the following problem:
Rewrite `forward` of MultiheadAttention used in vision_transformer for ncnn backend. Args: query (Tensor): The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, else [bs, num_queries embed_dims]. key (Tensor): The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, else [bs, num_keys, embed_dims] . If None, the ``query`` will be used. Defaults to None. value (Tensor): The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. Defaults to None. If None, the `key` will be used. identity (Tensor): This tensor, with the same shape as x, will be used for the identity link. If None, `x` will be used. Defaults to None. query_pos (Tensor): The positional encoding for query, with the same shape as `x`. If not None, it will be added to `x` before forward function. Defaults to None. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. Defaults to None. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. attn_mask (Tensor): ByteTensor mask with shape [num_queries, num_keys]. Same in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. Defaults to None. Returns: Tensor: forwarded results with shape [bs, num_queries embed_dims].
Here is the function:
def multiheadattention__forward__ncnn(self,
query,
key=None,
value=None,
identity=None,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None,
**kwargs):
"""Rewrite `forward` of MultiheadAttention used in vision_transformer for
ncnn backend.
Args:
query (Tensor): The input query with shape [num_queries, bs,
embed_dims] if self.batch_first is False, else
[bs, num_queries embed_dims].
key (Tensor): The key tensor with shape [num_keys, bs,
embed_dims] if self.batch_first is False, else
[bs, num_keys, embed_dims] .
If None, the ``query`` will be used. Defaults to None.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Defaults to None.
If None, the `key` will be used.
identity (Tensor): This tensor, with the same shape as x,
will be used for the identity link.
If None, `x` will be used. Defaults to None.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. If not None, it will
be added to `x` before forward function. Defaults to None.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Defaults to None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`. Defaults to None.
attn_mask (Tensor): ByteTensor mask with shape [num_queries,
num_keys]. Same in `nn.MultiheadAttention.forward`.
Defaults to None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
Defaults to None.
Returns:
Tensor: forwarded results with shape
[bs, num_queries embed_dims].
"""
if key is None:
key = query
if value is None:
value = key
if identity is None:
identity = query
if key_pos is None:
if query_pos is not None:
# use query_pos if key_pos is not available
if query_pos.shape == key.shape:
key_pos = query_pos
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
assert query is key and key is value, 'only support query==key==value'
assert self.batch_first, 'only support batch on first dim'
assert attn_mask is None
assert key_padding_mask is None
# split qkv weight and bias
qkv_weight = self.attn.in_proj_weight.data.reshape(3, -1, self.embed_dims)
q_weight = qkv_weight[0]
k_weight = qkv_weight[1]
v_weight = qkv_weight[2]
qkv_bias = self.attn.in_proj_bias.data.reshape(3, self.embed_dims)
q_bias = qkv_bias[0]
k_bias = qkv_bias[1]
v_bias = qkv_bias[2]
# out weight and bias
o_weight = self.attn.out_proj.weight.data
o_bias = self.attn.out_proj.bias.data
# export to MultiHeadAttention in ncnn
out = MultiHeadAttentionop.apply(query, key, value, q_weight, q_bias,
k_weight, k_bias, v_weight, v_bias,
o_weight, o_bias, self.embed_dims,
self.num_heads)
return identity + self.dropout_layer(self.proj_drop(out)) | Rewrite `forward` of MultiheadAttention used in vision_transformer for ncnn backend. Args: query (Tensor): The input query with shape [num_queries, bs, embed_dims] if self.batch_first is False, else [bs, num_queries embed_dims]. key (Tensor): The key tensor with shape [num_keys, bs, embed_dims] if self.batch_first is False, else [bs, num_keys, embed_dims] . If None, the ``query`` will be used. Defaults to None. value (Tensor): The value tensor with same shape as `key`. Same in `nn.MultiheadAttention.forward`. Defaults to None. If None, the `key` will be used. identity (Tensor): This tensor, with the same shape as x, will be used for the identity link. If None, `x` will be used. Defaults to None. query_pos (Tensor): The positional encoding for query, with the same shape as `x`. If not None, it will be added to `x` before forward function. Defaults to None. key_pos (Tensor): The positional encoding for `key`, with the same shape as `key`. Defaults to None. If not None, it will be added to `key` before forward function. If None, and `query_pos` has the same shape as `key`, then `query_pos` will be used for `key_pos`. Defaults to None. attn_mask (Tensor): ByteTensor mask with shape [num_queries, num_keys]. Same in `nn.MultiheadAttention.forward`. Defaults to None. key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. Defaults to None. Returns: Tensor: forwarded results with shape [bs, num_queries embed_dims]. |
188,625 | from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend
The provided code snippet includes necessary dependencies for implementing the `patch_embed__forward__ncnn` function. Write a Python function `def patch_embed__forward__ncnn(self, x)` to solve the following problem:
Rewrite `forward` of PatchEmbed for ncnn backend. Args: x (Tensor): Has shape (B, C, H, W). In most case, C is 3. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, out_h * out_w, embed_dims) - out_size (tuple[int]): Spatial shape of x, arrange as (out_h, out_w).
Here is the function:
def patch_embed__forward__ncnn(self, x):
"""Rewrite `forward` of PatchEmbed for ncnn backend.
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adaptive_padding:
x = self.adaptive_padding(x)
x = self.projection(x)
x_shape = x.shape
out_size = (x_shape[2], x_shape[3])
x = x.reshape((x_shape[0], x_shape[1], -1)).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size | Rewrite `forward` of PatchEmbed for ncnn backend. Args: x (Tensor): Has shape (B, C, H, W). In most case, C is 3. Returns: tuple: Contains merged results and its spatial shape. - x (Tensor): Has shape (B, out_h * out_w, embed_dims) - out_size (tuple[int]): Spatial shape of x, arrange as (out_h, out_w). |
188,626 |
The provided code snippet includes necessary dependencies for implementing the `roi_align_rotated_default` function. Write a Python function `def roi_align_rotated_default(g, input: Tensor, rois: Tensor, output_size: List[int], spatial_scale: float, sampling_ratio: int, aligned: bool, clockwise: bool)` to solve the following problem:
Rewrite symbolic function for default backend. Replace onnx::RoIAlignRotated with mmdeploy::MMCVRoIAlignRotated. Args: ctx (ContextCaller): The context with additional information. g (Graph): The traced onnx graph. input (Tensor): Input tensor, 4-D feature map of shape (N, C, H, W). rois (Tensor): Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. output_size(List[int]): Output size of height and width. spatial_scale (float): sampling_ratio (int): Number of inputs samples to take for each output sample. 0 to take samples densely for current models. aligned (bool): With `aligned=True`, we first appropriately scale the ROI and then shift it by -0.5 prior to calling roi_align. This produces the correct neighbors; clockwise (bool): If True, the angle in each proposal follows a clockwise fashion in image space, otherwise, the angle is counterclockwise. Default: False. Returns: MMCVRoiAlign op for onnx.
Here is the function:
def roi_align_rotated_default(g, input: Tensor, rois: Tensor,
output_size: List[int], spatial_scale: float,
sampling_ratio: int, aligned: bool,
clockwise: bool):
"""Rewrite symbolic function for default backend.
Replace onnx::RoIAlignRotated with mmdeploy::MMCVRoIAlignRotated.
Args:
ctx (ContextCaller): The context with additional information.
g (Graph): The traced onnx graph.
input (Tensor): Input tensor, 4-D feature map of shape (N, C, H, W).
rois (Tensor): Bx5 boxes. First column is the index into N. The other
4 columns are xyxy.
output_size(List[int]): Output size of height and width.
spatial_scale (float):
sampling_ratio (int): Number of inputs samples to take for each
output sample. 0 to take samples densely for current models.
aligned (bool): With `aligned=True`, we first appropriately scale
the ROI and then shift it by -0.5 prior to calling roi_align.
This produces the correct neighbors;
clockwise (bool): If True, the angle in each proposal follows a
clockwise fashion in image space, otherwise, the angle is
counterclockwise. Default: False.
Returns:
MMCVRoiAlign op for onnx.
"""
return g.op(
'mmdeploy::MMCVRoIAlignRotated',
input,
rois,
output_height_i=output_size[0],
output_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_i=sampling_ratio,
aligned_i=aligned,
clockwise_i=clockwise) | Rewrite symbolic function for default backend. Replace onnx::RoIAlignRotated with mmdeploy::MMCVRoIAlignRotated. Args: ctx (ContextCaller): The context with additional information. g (Graph): The traced onnx graph. input (Tensor): Input tensor, 4-D feature map of shape (N, C, H, W). rois (Tensor): Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. output_size(List[int]): Output size of height and width. spatial_scale (float): sampling_ratio (int): Number of inputs samples to take for each output sample. 0 to take samples densely for current models. aligned (bool): With `aligned=True`, we first appropriately scale the ROI and then shift it by -0.5 prior to calling roi_align. This produces the correct neighbors; clockwise (bool): If True, the angle in each proposal follows a clockwise fashion in image space, otherwise, the angle is counterclockwise. Default: False. Returns: MMCVRoiAlign op for onnx. |
188,627 |
The provided code snippet includes necessary dependencies for implementing the `roi_align_default` function. Write a Python function `def roi_align_default(g, input: Tensor, rois: Tensor, output_size: List[int], spatial_scale: float, sampling_ratio: int, pool_mode: str, aligned: bool)` to solve the following problem:
Rewrite symbolic function for default backend. Replace onnx::RoiAlign with mmcv::MMCVRoiAlign for PPLNN. For ONNXRuntime, align operation get done outside the inference engine for opset versions lower than 16. By default, onnx::RoiAlign get replaced to mmdeploy::MMCVRoiAlign. Args: ctx (ContextCaller): The context with additional information. g (Graph): The traced onnx graph. input (Tensor): Input tensor, 4-D feature map of shape (N, C, H, W). rois (Tensor): Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. output_size(List[int]): Output size of height and width. spatial_scale (float): sampling_ratio (int): Number of inputs samples to take for each output sample. 0 to take samples densely for current models. pool_mode (str): Pooling mode in each bin, could be 'avg' or 'max'. aligned (bool): With `aligned=True`, we first appropriately scale the ROI and then shift it by -0.5 prior to calling roi_align. This produces the correct neighbors; Returns: MMCVRoiAlign op for onnx.
Here is the function:
def roi_align_default(g, input: Tensor, rois: Tensor, output_size: List[int],
spatial_scale: float, sampling_ratio: int,
pool_mode: str, aligned: bool):
"""Rewrite symbolic function for default backend.
Replace onnx::RoiAlign with mmcv::MMCVRoiAlign for PPLNN. For ONNXRuntime,
align operation get done outside the inference engine for opset versions
lower than 16. By default, onnx::RoiAlign get replaced to
mmdeploy::MMCVRoiAlign.
Args:
ctx (ContextCaller): The context with additional information.
g (Graph): The traced onnx graph.
input (Tensor): Input tensor, 4-D feature map of shape (N, C, H, W).
rois (Tensor): Bx5 boxes. First column is the index into N. The other
4 columns are xyxy.
output_size(List[int]): Output size of height and width.
spatial_scale (float):
sampling_ratio (int): Number of inputs samples to take for each
output sample. 0 to take samples densely for current models.
pool_mode (str): Pooling mode in each bin, could be 'avg' or 'max'.
aligned (bool): With `aligned=True`, we first appropriately scale
the ROI and then shift it by -0.5 prior to calling roi_align.
This produces the correct neighbors;
Returns:
MMCVRoiAlign op for onnx.
"""
ctx = SYMBOLIC_REWRITER.get_context()
backend = get_backend(ctx.cfg)
if backend == Backend.PPLNN or backend == Backend.TENSORRT:
domain = 'mmcv'
return g.op(
f'{domain}::MMCVRoiAlign',
input,
rois,
output_height_i=output_size[0],
output_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_i=sampling_ratio,
mode_s=pool_mode,
aligned_i=aligned)
else:
from torch.onnx.symbolic_opset9 import _cast_Long
from torch.onnx.symbolic_opset11 import add, select
ir_cfg = get_ir_config(ctx.cfg)
opset_version = ir_cfg.get('opset_version', 11)
if opset_version < 13:
batch_indices = _cast_Long(
g,
g.op(
'Squeeze',
select(
g, rois, 1,
g.op(
'Constant',
value_t=torch.tensor([0], dtype=torch.long))),
axes_i=[1]), False)
else:
axes = g.op(
'Constant', value_t=torch.tensor([1], dtype=torch.long))
batch_indices = _cast_Long(
g,
g.op(
'Squeeze',
select(
g, rois, 1,
g.op(
'Constant',
value_t=torch.tensor([0], dtype=torch.long))),
axes), False)
rois = select(
g, rois, 1,
g.op(
'Constant',
value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long)))
if opset_version < 16:
# preprocess rois to make compatible with opset 16-
# as for opset 16+, `aligned` get implemented inside onnxruntime.
if aligned is True:
rois = add(
g, rois,
g.op(
'Constant',
value_t=torch.tensor([-0.5 / spatial_scale],
dtype=torch.float)))
return g.op(
'RoiAlign',
input,
rois,
batch_indices,
output_height_i=output_size[0],
output_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_i=sampling_ratio,
mode_s=pool_mode)
else:
if aligned:
coordinate_transformation_mode = 'half_pixel'
else:
coordinate_transformation_mode = 'output_half_pixel'
return g.op(
'RoiAlign',
input,
rois,
batch_indices,
output_height_i=output_size[0],
output_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_i=sampling_ratio,
mode_s=pool_mode,
coordinate_transformation_mode_s=coordinate_transformation_mode
) | Rewrite symbolic function for default backend. Replace onnx::RoiAlign with mmcv::MMCVRoiAlign for PPLNN. For ONNXRuntime, align operation get done outside the inference engine for opset versions lower than 16. By default, onnx::RoiAlign get replaced to mmdeploy::MMCVRoiAlign. Args: ctx (ContextCaller): The context with additional information. g (Graph): The traced onnx graph. input (Tensor): Input tensor, 4-D feature map of shape (N, C, H, W). rois (Tensor): Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. output_size(List[int]): Output size of height and width. spatial_scale (float): sampling_ratio (int): Number of inputs samples to take for each output sample. 0 to take samples densely for current models. pool_mode (str): Pooling mode in each bin, could be 'avg' or 'max'. aligned (bool): With `aligned=True`, we first appropriately scale the ROI and then shift it by -0.5 prior to calling roi_align. This produces the correct neighbors; Returns: MMCVRoiAlign op for onnx. |
188,628 | import torch
from packaging import version
from torch import Tensor
from torch.onnx import symbolic_helper as sym_help
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import IR, is_dynamic_batch
from mmdeploy.utils.constants import Backend
from .nms_match import multiclass_nms_match
from .nms_rotated import multiclass_nms_rotated
def _multiclass_nms_single(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False):
"""Create a dummy onnx::NonMaxSuppression op while exporting to ONNX.
Single batch nms could be optimized.
"""
if version.parse(torch.__version__) < version.parse('1.13.0'):
max_output_boxes_per_class = torch.LongTensor(
[max_output_boxes_per_class])
iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32)
score_threshold = torch.tensor([score_threshold], dtype=torch.float32)
# pre topk
pre_topk_inds = None
if pre_top_k > 0:
max_scores, _ = scores.max(-1)
_, topk_inds = max_scores.squeeze(0).topk(pre_top_k)
boxes = boxes[:, topk_inds, :]
scores = scores[:, topk_inds, :]
pre_topk_inds = topk_inds
scores = scores.permute(0, 2, 1)
selected_indices = ONNXNMSop.apply(boxes, scores,
max_output_boxes_per_class,
iou_threshold, score_threshold)
cls_inds = selected_indices[:, 1]
box_inds = selected_indices[:, 2]
scores = scores[:, cls_inds, box_inds].unsqueeze(2)
boxes = boxes[:, box_inds, ...]
dets = torch.cat([boxes, scores], dim=2)
labels = cls_inds.unsqueeze(0)
# pad
dets = torch.cat((dets, dets.new_zeros((1, 1, 5))), 1)
labels = torch.cat((labels, labels.new_zeros((1, 1))), 1)
# topk or sort
is_use_topk = keep_top_k > 0 and \
(torch.onnx.is_in_onnx_export() or keep_top_k < dets.shape[1])
if is_use_topk:
_, topk_inds = dets[:, :, -1].topk(keep_top_k, dim=1)
else:
_, topk_inds = dets[:, :, -1].sort(dim=1, descending=True)
topk_inds = topk_inds.squeeze(0)
dets = dets[:, topk_inds, ...]
labels = labels[:, topk_inds, ...]
if output_index:
bbox_index = box_inds.unsqueeze(0)
if pre_top_k > 0:
bbox_index = pre_topk_inds[None, box_inds]
# pad index to keep same dim as dets and labels
bbox_index = torch.cat([bbox_index, -bbox_index.new_ones((1, 1))], 1)
if keep_top_k > 0:
bbox_index = bbox_index[:, topk_inds]
return dets, labels, bbox_index
else:
return dets, labels
func_name='mmdeploy.mmcv.ops.nms._multiclass_nms')
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms__default` function. Write a Python function `def multiclass_nms__default(boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.5, score_threshold: float = 0.05, pre_top_k: int = -1, keep_top_k: int = -1, output_index: bool = False)` to solve the following problem:
Create a dummy onnx::NonMaxSuppression op while exporting to ONNX. This function helps exporting to onnx with batch and multiclass NMS op. It only supports class-agnostic detection results. That is, the scores is of shape (N, num_bboxes, num_classes) and the boxes is of shape (N, num_boxes, 4). Args: boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det].
Here is the function:
def multiclass_nms__default(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False):
"""Create a dummy onnx::NonMaxSuppression op while exporting to ONNX.
This function helps exporting to onnx with batch and multiclass NMS op.
It only supports class-agnostic detection results. That is, the scores
is of shape (N, num_bboxes, num_classes) and the boxes is of shape
(N, num_boxes, 4).
Args:
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
Returns:
tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5]
and `labels` of shape [N, num_det].
"""
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
batch_size = boxes.size(0)
if not is_dynamic_batch(deploy_cfg) and batch_size == 1:
return _multiclass_nms_single(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=output_index)
else:
return ctx.origin_func(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=output_index) | Create a dummy onnx::NonMaxSuppression op while exporting to ONNX. This function helps exporting to onnx with batch and multiclass NMS op. It only supports class-agnostic detection results. That is, the scores is of shape (N, num_bboxes, num_classes) and the boxes is of shape (N, num_boxes, 4). Args: boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. |
188,629 | import torch
from packaging import version
from torch import Tensor
from torch.onnx import symbolic_helper as sym_help
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import IR, is_dynamic_batch
from mmdeploy.utils.constants import Backend
from .nms_match import multiclass_nms_match
from .nms_rotated import multiclass_nms_rotated
class TRTBatchedNMSop(torch.autograd.Function):
"""Create mmdeploy::TRTBatchedNMS op for TensorRT backend.
NMS in ONNX supports dynamic outputs. This class helps replace
onnx::NonMaxSuppression with mmdeploy::TRTBatchedNMS.
"""
def forward(ctx,
boxes: Tensor,
scores: Tensor,
num_classes: int,
pre_topk: int,
after_topk: int,
iou_threshold: float,
score_threshold: float,
background_label_id: int = -1,
return_index: bool = False):
"""Forward of batched nms.
Args:
ctx (Context): The context with meta information.
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
num_classes (int): MThe number of classes in the network.
pre_topk (int): The number of bounding boxes to be fed into
the NMS step.
after_topk (int): The number of total bounding boxes to be kept
per-image after the NMS step. Should be less than or equal
to the pre_topk value.
iou_threshold (float): IOU threshold of nms.
score_threshold (float): score threshold of nms.
background_label_id (int): The label ID for the background class.
If there is no background class, set it to -1.
Returns:
Tensor: Selected indices of boxes. 2-D tensor of shape
(num_selected_indices, 3) with each row of
[batch_index, class_index, box_index]. Note it is generated
randomly to make it exportable to onnx.
"""
batch_size, num_boxes, num_classes = scores.shape
out_boxes = min(num_boxes, after_topk)
ret = (torch.rand(batch_size, out_boxes, 5).to(scores.device),
torch.randint(0, num_classes,
(batch_size, out_boxes)).to(scores.device))
if return_index:
ret = ret + (torch.randint(
0, out_boxes, (batch_size, out_boxes)).to(scores.device), )
return ret
def symbolic(g,
boxes: Tensor,
scores: Tensor,
num_classes: int,
pre_topk: int,
after_topk: int,
iou_threshold: float,
score_threshold: float,
background_label_id: int = -1,
return_index: bool = False):
"""Symbolic function for mmdeploy::TRTBatchedNMS."""
return g.op(
'mmdeploy::TRTBatchedNMS',
boxes,
scores,
num_classes_i=num_classes,
background_label_id_i=background_label_id,
iou_threshold_f=iou_threshold,
score_threshold_f=score_threshold,
topk_i=pre_topk,
keep_topk_i=after_topk,
is_normalized_i=False,
clip_boxes_i=False,
return_index_i=return_index,
outputs=3 if return_index else 2)
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms_static` function. Write a Python function `def multiclass_nms_static(boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.5, score_threshold: float = 0.05, pre_top_k: int = -1, keep_top_k: int = -1, output_index: bool = False)` to solve the following problem:
Wrapper for `multiclass_nms` with TensorRT. Args: ctx (ContextCaller): The context with additional information. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det].
Here is the function:
def multiclass_nms_static(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False):
"""Wrapper for `multiclass_nms` with TensorRT.
Args:
ctx (ContextCaller): The context with additional information.
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
Returns:
tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5]
and `labels` of shape [N, num_det].
"""
boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2)
keep_top_k = max_output_boxes_per_class if keep_top_k < 0 else min(
max_output_boxes_per_class, keep_top_k)
nms_output = TRTBatchedNMSop.apply(
boxes,
scores,
int(scores.shape[-1]),
pre_top_k,
keep_top_k,
iou_threshold,
score_threshold,
-1,
output_index,
)
dets = nms_output[0]
labels = nms_output[1]
box_index = None if len(nms_output) <= 2 else nms_output[2]
# retain shape info
batch_size = boxes.size(0)
dets_shape = dets.shape
label_shape = labels.shape
dets = dets.reshape([batch_size, *dets_shape[1:]])
labels = labels.reshape([batch_size, *label_shape[1:]])
if output_index:
return dets, labels, box_index
return dets, labels | Wrapper for `multiclass_nms` with TensorRT. Args: ctx (ContextCaller): The context with additional information. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. |
188,630 | import torch
from packaging import version
from torch import Tensor
from torch.onnx import symbolic_helper as sym_help
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import IR, is_dynamic_batch
from mmdeploy.utils.constants import Backend
from .nms_match import multiclass_nms_match
from .nms_rotated import multiclass_nms_rotated
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms__coreml` function. Write a Python function `def multiclass_nms__coreml(boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.5, score_threshold: float = 0.05, pre_top_k: int = -1, keep_top_k: int = -1, output_index: bool = False)` to solve the following problem:
rewrite for coreml batched nms. Use coreml_nms from custom ops.
Here is the function:
def multiclass_nms__coreml(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False):
"""rewrite for coreml batched nms.
Use coreml_nms from custom ops.
"""
# load custom nms
from mmdeploy.backend.torchscript import get_ops_path, ops_available
assert ops_available(), 'coreml require custom torchscript ops support.'
torch.ops.load_library(get_ops_path())
try:
coreml_nms = torch.ops.mmdeploy.coreml_nms
except Exception:
raise Exception(
'Can not use coreml_nms. Please build torchscript custom ops.')
batch_size = scores.shape[0]
assert batch_size == 1, 'batched nms is not supported for now.'
# pre-topk
if pre_top_k > 0:
max_scores, _ = scores.max(-1)
_, topk_inds = max_scores.topk(pre_top_k)
boxes = boxes[:, topk_inds.squeeze(), ...]
scores = scores[:, topk_inds.squeeze(), ...]
def _xyxy2xywh(boxes):
xy0 = boxes[..., :2]
xy1 = boxes[..., 2:]
xy = (xy0 + xy1) / 2
wh = xy1 - xy0
return torch.cat([xy, wh], dim=-1)
def _xywh2xyxy(boxes):
xy = boxes[..., :2]
half_wh = boxes[..., 2:] / 2
return torch.cat([xy - half_wh, xy + half_wh], dim=-1)
boxes = _xyxy2xywh(boxes)
keep_top_k = keep_top_k if keep_top_k > 0 else max_output_boxes_per_class
boxes, scores, box_index, _ = coreml_nms(
boxes, scores, iou_threshold, score_threshold,
min(keep_top_k, max_output_boxes_per_class))
scores, labels = scores.max(-1)
boxes = _xywh2xyxy(boxes)
dets = torch.cat([boxes, scores.unsqueeze(-1)], dim=-1)
if output_index:
return dets, labels, box_index
return dets, labels | rewrite for coreml batched nms. Use coreml_nms from custom ops. |
188,631 | import torch
from packaging import version
from torch import Tensor
from torch.onnx import symbolic_helper as sym_help
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import IR, is_dynamic_batch
from mmdeploy.utils.constants import Backend
from .nms_match import multiclass_nms_match
from .nms_rotated import multiclass_nms_rotated
def _select_nms_index(scores: torch.Tensor,
boxes: torch.Tensor,
nms_index: torch.Tensor,
batch_size: int,
keep_top_k: int = -1,
pre_inds: torch.Tensor = None,
output_index: bool = False):
"""Transform NMS output.
Args:
scores (Tensor): The detection scores of shape
[N, num_classes, num_boxes].
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
nms_index (Tensor): NMS output of bounding boxes indexing.
batch_size (int): Batch size of the input image.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
pre_inds (Tensor): The pre-topk indices of boxes before nms.
Defaults to None.
return_index (bool): Whether to return indices of original bboxes.
Defaults to False.
Returns:
tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5]
and `labels` of shape [N, num_det].
"""
batch_inds, cls_inds = nms_index[:, 0], nms_index[:, 1]
box_inds = nms_index[:, 2]
# index by nms output
scores = scores[batch_inds, cls_inds, box_inds].unsqueeze(1)
boxes = boxes[batch_inds, box_inds, ...]
dets = torch.cat([boxes, scores], dim=1)
# batch all
batched_dets = dets.unsqueeze(0).repeat(batch_size, 1, 1)
batch_template = torch.arange(
0, batch_size, dtype=batch_inds.dtype, device=batch_inds.device)
batched_dets = batched_dets.where(
(batch_inds == batch_template.unsqueeze(1)).unsqueeze(-1),
batched_dets.new_zeros(1))
batched_labels = cls_inds.unsqueeze(0).repeat(batch_size, 1)
batched_labels = batched_labels.where(
(batch_inds == batch_template.unsqueeze(1)),
batched_labels.new_ones(1) * -1)
N = batched_dets.shape[0]
# expand tensor to eliminate [0, ...] tensor
batched_dets = torch.cat((batched_dets, batched_dets.new_zeros((N, 1, 5))),
1)
batched_labels = torch.cat((batched_labels, batched_labels.new_zeros(
(N, 1))), 1)
if output_index and pre_inds is not None:
# batch all
pre_inds = pre_inds[batch_inds, box_inds]
pre_inds = pre_inds.unsqueeze(0).repeat(batch_size, 1)
pre_inds = pre_inds.where((batch_inds == batch_template.unsqueeze(1)),
pre_inds.new_zeros(1))
pre_inds = torch.cat((pre_inds, -pre_inds.new_ones((N, 1))), 1)
# sort
is_use_topk = keep_top_k > 0 and \
(torch.onnx.is_in_onnx_export() or keep_top_k < batched_dets.shape[1])
if is_use_topk:
_, topk_inds = batched_dets[:, :, -1].topk(keep_top_k, dim=1)
else:
_, topk_inds = batched_dets[:, :, -1].sort(dim=1, descending=True)
topk_batch_inds = torch.arange(
batch_size, dtype=topk_inds.dtype,
device=topk_inds.device).view(-1, 1)
batched_dets = batched_dets[topk_batch_inds, topk_inds, ...]
batched_labels = batched_labels[topk_batch_inds, topk_inds, ...]
if output_index:
if pre_inds is not None:
topk_inds = pre_inds[topk_batch_inds, topk_inds, ...]
return batched_dets, batched_labels, topk_inds
# slice and recover the tensor
return batched_dets, batched_labels
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms__torchscript` function. Write a Python function `def multiclass_nms__torchscript(boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.5, score_threshold: float = 0.05, pre_top_k: int = -1, keep_top_k: int = -1, output_index=False)` to solve the following problem:
rewrite for torchscript batched nms. Use batched_nms from torchvision instead of custom nms.
Here is the function:
def multiclass_nms__torchscript(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index=False):
"""rewrite for torchscript batched nms.
Use batched_nms from torchvision instead of custom nms.
"""
# TODO: simplify inference for non-batch model
from torchvision.ops import batched_nms
batch_size = scores.shape[0]
num_boxes = scores.shape[1]
num_classes = scores.shape[2]
box_per_cls = len(boxes.shape) == 4
scores = torch.where(scores > score_threshold, scores, scores.new_zeros(1))
pre_topk_inds = None
# pre-topk
if pre_top_k > 0:
max_scores, _ = scores.max(-1)
_, topk_inds = max_scores.topk(pre_top_k)
pre_topk_inds = topk_inds
batch_inds = torch.arange(batch_size).view(-1, 1).long()
boxes = boxes[batch_inds, topk_inds, ...]
scores = scores[batch_inds, topk_inds, :]
num_boxes = scores.shape[1]
idxs = torch.arange(0, batch_size, device=scores.device).unsqueeze(1)
idxs = idxs.repeat(1, num_boxes).view(-1)
keeps = [None] * num_classes
for cls_id in range(num_classes):
box = boxes if not box_per_cls else boxes[:, :, cls_id, :]
score = scores[:, :, cls_id]
box = box.view(-1, 4)
score = score.view(-1)
box_keep = batched_nms(box, score, idxs, iou_threshold=iou_threshold)
box_keep = box_keep[:max_output_boxes_per_class * batch_size]
batch_keep = idxs[box_keep]
cls_keep = torch.ones_like(box_keep) * cls_id
box_keep = box_keep - batch_keep * num_boxes
keeps[cls_id] = torch.stack([batch_keep, cls_keep, box_keep], dim=1)
keeps = torch.cat(keeps)
scores = scores.permute(0, 2, 1)
return _select_nms_index(
scores,
boxes,
keeps,
batch_size,
keep_top_k=keep_top_k,
pre_inds=pre_topk_inds,
output_index=output_index) | rewrite for torchscript batched nms. Use batched_nms from torchvision instead of custom nms. |
188,632 | import torch
from packaging import version
from torch import Tensor
from torch.onnx import symbolic_helper as sym_help
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import IR, is_dynamic_batch
from mmdeploy.utils.constants import Backend
from .nms_match import multiclass_nms_match
from .nms_rotated import multiclass_nms_rotated
class AscendBatchNMSOp(torch.autograd.Function):
def forward(ctx, bboxes: torch.Tensor, scores: torch.Tensor,
score_threshold: float, iou_threshold: float,
max_size_per_class: int, max_total_size: int):
"""Dummy nms forward
Args:
boxes (torch.Tensor): boxes in shape (batch, N, C, 4).
scores (torch.Tensor): scores in shape (batch, N, C).
score_threshold (float): the score threshold.
iou_threshold (float): the iou threshold.
max_size_per_class (int): max size per class.
max_total_size (int): max total size.
Returns:
(torch.Tensor): boxes,(1, N, 4)
(torch.Tensor): scores,(1, N)
(torch.Tensor): classes,(1, N)
(torch.Tensor): num_dets,(1,)
"""
# Python implementation for onnx export
nmsed_boxes = bboxes[:, :max_total_size, 0, :]
nmsed_scores = scores[:, :max_total_size, 0]
nmsed_classes = torch.arange(max_total_size, dtype=torch.long)
nmsed_num = torch.Tensor([max_total_size])
return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num
def symbolic(g, bboxes, scores, score_thr, iou_thr, max_size_p_class,
max_t_size):
nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num = g.op(
'mmdeploy::BatchMultiClassNMS',
bboxes,
scores,
score_threshold_f=score_thr,
iou_threshold_f=iou_thr,
max_size_per_class_i=max_size_p_class,
max_total_size_i=max_t_size,
outputs=4)
return nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_num
func_name='mmdeploy.mmcv.ops.nms._multiclass_nms', backend='ascend')
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms__ascend` function. Write a Python function `def multiclass_nms__ascend(boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.5, score_threshold: float = 0.05, pre_top_k: int = -1, keep_top_k: int = -1, output_index: bool = False)` to solve the following problem:
Wrapper for `multiclass_nms` with Ascend. Args: ctx (ContextCaller): The context with additional information. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det].
Here is the function:
def multiclass_nms__ascend(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False):
"""Wrapper for `multiclass_nms` with Ascend.
Args:
ctx (ContextCaller): The context with additional information.
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
Returns:
tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5]
and `labels` of shape [N, num_det].
"""
assert not output_index, 'output_index is not supported on this backend.'
boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2)
keep_top_k = max_output_boxes_per_class if keep_top_k < 0 else min(
max_output_boxes_per_class, keep_top_k)
nmsed_boxes, nmsed_scores, nmsed_classes, _ = AscendBatchNMSOp.apply(
boxes, scores, score_threshold, iou_threshold, keep_top_k, keep_top_k)
dets = torch.cat([nmsed_boxes, nmsed_scores.unsqueeze(2)], dim=-1)
return dets, nmsed_classes.int() | Wrapper for `multiclass_nms` with Ascend. Args: ctx (ContextCaller): The context with additional information. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. |
188,633 | import torch
from torch import Tensor
import mmdeploy
from mmdeploy.core import FUNCTION_REWRITER, mark
class TRTBatchedRotatedNMSop(torch.autograd.Function):
"""Create mmdeploy::TRTBatchedRotatedNMSop op for TensorRT backend.
NMS in ONNX supports dynamic outputs. This class helps replace
onnx::NonMaxSuppression with mmdeploy::TRTBatchedRotatedNMSop.
"""
def forward(ctx,
boxes: Tensor,
scores: Tensor,
num_classes: int,
pre_topk: int,
after_topk: int,
iou_threshold: float,
score_threshold: float,
background_label_id: int = -1):
"""Forward of batched rotated nms.
Args:
ctx (Context): The context with meta information.
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 5].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
num_classes (int): MThe number of classes in the network.
pre_topk (int): The number of bounding boxes to be fed into
the NMS step.
after_topk (int): The number of total bounding boxes to be kept
per-image after the NMS step. Should be less than or equal
to the pre_topk value.
iou_threshold (float): IOU threshold of nms.
score_threshold (float): score threshold of nms.
background_label_id (int): The label ID for the background class.
If there is no background class, set it to -1.
Returns:
dets (Tensor): Bboxes and scores of the rotated nms results.
labels (Tensor): Class id of the rotated nms results.
"""
batch_size, num_boxes, num_classes = scores.shape
out_boxes = min(num_boxes, after_topk)
return torch.rand(batch_size, out_boxes,
6).to(scores.device), torch.randint(
0, num_classes,
(batch_size, out_boxes)).to(scores.device)
def symbolic(g,
boxes: Tensor,
scores: Tensor,
num_classes: int,
pre_topk: int,
after_topk: int,
iou_threshold: float,
score_threshold: float,
background_label_id: int = -1):
"""Symbolic function for mmdeploy::TRTBatchedNMS."""
return g.op(
'mmdeploy::TRTBatchedRotatedNMS',
boxes,
scores,
num_classes_i=num_classes,
background_label_id_i=background_label_id,
iou_threshold_f=iou_threshold,
score_threshold_f=score_threshold,
topk_i=pre_topk,
keep_topk_i=after_topk,
is_normalized_i=False,
clip_boxes_i=False,
outputs=2)
The provided code snippet includes necessary dependencies for implementing the `multiclass_nms_rotated__tensorrt` function. Write a Python function `def multiclass_nms_rotated__tensorrt(boxes: Tensor, scores: Tensor, max_output_boxes_per_class: int = 1000, iou_threshold: float = 0.5, score_threshold: float = 0.05, pre_top_k: int = -1, keep_top_k: int = -1)` to solve the following problem:
Wrapper for `multiclass_nms` with TensorRT. Args: ctx (ContextCaller): The context with additional information. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 5]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 6] and `labels` of shape [N, num_det].
Here is the function:
def multiclass_nms_rotated__tensorrt(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1):
"""Wrapper for `multiclass_nms` with TensorRT.
Args:
ctx (ContextCaller): The context with additional information.
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 5].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
Returns:
tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 6]
and `labels` of shape [N, num_det].
"""
boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2)
keep_top_k = max_output_boxes_per_class if keep_top_k < 0 else min(
max_output_boxes_per_class, keep_top_k)
dets, labels = TRTBatchedRotatedNMSop.apply(boxes, scores,
int(scores.shape[-1]),
pre_top_k, keep_top_k,
iou_threshold, score_threshold,
-1)
return dets, labels | Wrapper for `multiclass_nms` with TensorRT. Args: ctx (ContextCaller): The context with additional information. boxes (Tensor): The bounding boxes of shape [N, num_boxes, 5]. scores (Tensor): The detection scores of shape [N, num_boxes, num_classes]. max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5. score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (int): Number of top K boxes to keep before nms. Defaults to -1. keep_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. Returns: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 6] and `labels` of shape [N, num_det]. |
Subsets and Splits