id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
188,834 | from mmengine import Registry
from mmdeploy.utils.config_utils import Backend
def __build_backend_wrapper_class(backend: Backend, registry: Registry):
return registry.module_dict[backend.value] | null |
188,835 | from mmengine import Registry
from mmdeploy.utils.config_utils import Backend
def get_backend_wrapper_class(backend: Backend) -> type:
"""Get the backend wrapper class from the registry.
Args:
backend (Backend): The backend enum type.
Returns:
type: The backend wrapper class
"""
return BACKEND_WRAPPER.build(backend)
def get_backend_file_count(backend: Backend):
backend_class = get_backend_wrapper_class(backend)
return backend_class.get_backend_file_count() | null |
188,836 | from typing import Optional, Sequence
from mmdeploy.utils.device import parse_cuda_device_id
from .utils import create_runtime, register_engines
def parse_cuda_device_id(device: str) -> int:
"""Parse cuda device index from a string.
Args:
device (str): The typical style of string specifying cuda device,
e.g.: 'cuda:0'.
Returns:
int: The parsed device id, defaults to `0`.
"""
match_result = re.match('([^:]+)(:[0-9]+)?$', device)
assert match_result is not None, f'Can not parse device {device}.'
assert match_result.group(1).lower() == 'cuda', 'Not cuda device.'
device_id = 0 if match_result.lastindex == 1 else int(
match_result.group(2)[1:])
return device_id
def create_runtime(onnx_file: str,
engines: List[pplnn.Engine]) -> pplnn.Runtime:
"""Create runtime object for pplnn.
Args:
onnx_file (str): path to onnx model
engines (List[pplnn.Engine]): engines used to create the runtime
object
Returns:
pplnn.Runtime: created runtime object
"""
runtime_builder = pplnn.onnx.RuntimeBuilderFactory.Create()
assert runtime_builder is not None, 'Failed to create '\
'onnx.RuntimeBuilder.'
status = runtime_builder.LoadModelFromFile(onnx_file)
assert status == pplcommon.RC_SUCCESS, 'Failed to load ONNX model.'
resources = pplnn.onnx.RuntimeBuilderResources()
resources.engines = engines
status = runtime_builder.SetResources(resources)
assert status == pplcommon.RC_SUCCESS, 'runtime_builder.SetResources() ' \
'Failed.'
status = runtime_builder.Preprocess()
assert status == pplcommon.RC_SUCCESS, 'runtime_builder.Preprocess() ' \
'Failed.'
runtime = runtime_builder.CreateRuntime()
assert runtime is not None, 'Failed to create onnx.Runtime'
return runtime
def register_engines(device_id: int,
disable_avx512: bool = False,
quick_select: bool = False,
input_shapes: Sequence[Sequence[int]] = None,
export_algo_file: str = None,
import_algo_file: str = None) -> List[pplnn.Engine]:
"""Register engines for pplnn runtime.
Args:
device_id (int): Specifying device index. `-1` for cpu.
disable_avx512 (bool): Whether to disable avx512 for x86.
Defaults to `False`.
quick_select (bool): Whether to use default algorithms.
Defaults to `False`.
input_shapes (Sequence[Sequence[int]]): shapes for PPLNN optimization.
export_algo_file (str): File path for exporting PPLNN optimization
file.
import_algo_file (str): File path for loading PPLNN optimization file.
Returns:
list[pplnn.Engine]: A list of registered pplnn engines.
"""
engines = []
logger = get_root_logger()
if device_id == -1:
x86_options = pplnn.x86.EngineOptions()
x86_engine = pplnn.x86.EngineFactory.Create(x86_options)
if not x86_engine:
logger.error('Failed to create x86 engine')
sys.exit(1)
if disable_avx512:
status = x86_engine.Configure(pplnn.x86.ENGINE_CONF_DISABLE_AVX512)
if status != pplcommon.RC_SUCCESS:
logger.error('x86 engine Configure() failed: ' +
pplcommon.GetRetCodeStr(status))
sys.exit(1)
engines.append(x86_engine)
else:
cuda_options = pplnn.cuda.EngineOptions()
cuda_options.device_id = device_id
cuda_options.mm_policy = pplnn.cuda.MM_BEST_FIT
cuda_engine = pplnn.cuda.EngineFactory.Create(cuda_options)
if not cuda_engine:
logger.error('Failed to create cuda engine.')
sys.exit(1)
if quick_select:
status = cuda_engine.Configure(
pplnn.cuda.ENGINE_CONF_USE_DEFAULT_ALGORITHMS)
if status != pplcommon.RC_SUCCESS:
logger.error('cuda engine Configure() failed: ' +
pplcommon.GetRetCodeStr(status))
sys.exit(1)
if input_shapes is not None:
status = cuda_engine.Configure(
pplnn.cuda.ENGINE_CONF_SET_INPUT_DIMS, input_shapes)
if status != pplcommon.RC_SUCCESS:
logger.error(
'cuda engine Configure(ENGINE_CONF_SET_INPUT_DIMS) '
'failed: ' + pplcommon.GetRetCodeStr(status))
sys.exit(1)
if export_algo_file is not None:
status = cuda_engine.Configure(
pplnn.cuda.ENGINE_CONF_EXPORT_ALGORITHMS, export_algo_file)
if status != pplcommon.RC_SUCCESS:
logger.error(
'cuda engine Configure(ENGINE_CONF_EXPORT_ALGORITHMS) '
'failed: ' + pplcommon.GetRetCodeStr(status))
sys.exit(1)
if import_algo_file is not None:
status = cuda_engine.Configure(
pplnn.cuda.ENGINE_CONF_IMPORT_ALGORITHMS, import_algo_file)
if status != pplcommon.RC_SUCCESS:
logger.error(
'cuda engine Configure(ENGINE_CONF_IMPORT_ALGORITHMS) '
'failed: ' + pplcommon.GetRetCodeStr(status))
sys.exit(1)
engines.append(cuda_engine)
return engines
The provided code snippet includes necessary dependencies for implementing the `from_onnx` function. Write a Python function `def from_onnx(onnx_model: str, output_file_prefix: str, device: str = 'cuda:0', input_shapes: Optional[Sequence[Sequence[int]]] = None, **kwargs)` to solve the following problem:
Convert ONNX to PPLNN. PPLNN is capable of optimizing onnx model. The optimized algorithm is saved into `algo_file` in json format. Note that `input_shapes` actually require multiple shapes of inputs in its original design. But in the pipeline of our codebase, we only pass one input shape which can be modified by users' own preferences. Args: output_file_prefix (str): File path to save PPLNN optimization algorithm and ONNX file onnx_model (str): Input onnx model. device (str): A string specifying device, defaults to 'cuda:0'. input_shapes (Sequence[Sequence[int]] | None): Shapes for PPLNN optimization, default to None. Examples: >>> from mmdeploy.apis.pplnn import from_onnx >>> >>> from_onnx(onnx_model = 'example.onnx', output_file_prefix = 'example')
Here is the function:
def from_onnx(onnx_model: str,
output_file_prefix: str,
device: str = 'cuda:0',
input_shapes: Optional[Sequence[Sequence[int]]] = None,
**kwargs):
"""Convert ONNX to PPLNN.
PPLNN is capable of optimizing onnx model. The optimized algorithm is saved
into `algo_file` in json format. Note that `input_shapes` actually require
multiple shapes of inputs in its original design. But in the pipeline of
our codebase, we only pass one input shape which can be modified by users'
own preferences.
Args:
output_file_prefix (str): File path to save PPLNN optimization
algorithm and ONNX file
onnx_model (str): Input onnx model.
device (str): A string specifying device, defaults to 'cuda:0'.
input_shapes (Sequence[Sequence[int]] | None): Shapes for PPLNN
optimization, default to None.
Examples:
>>> from mmdeploy.apis.pplnn import from_onnx
>>>
>>> from_onnx(onnx_model = 'example.onnx',
output_file_prefix = 'example')
"""
if device == 'cpu':
device_id = -1
else:
assert 'cuda' in device, f'unexpected device: {device}, must contain '
'`cpu` or `cuda`'
device_id = parse_cuda_device_id(device)
if input_shapes is None:
input_shapes = [[1, 3, 224,
224]] # PPLNN default shape for optimization
algo_file = output_file_prefix + '.json'
onnx_output_path = output_file_prefix + '.onnx'
engines = register_engines(
device_id,
disable_avx512=False,
quick_select=False,
export_algo_file=algo_file,
input_shapes=input_shapes)
_ = create_runtime(onnx_model, engines) # side effect: export algorithms
import shutil
if onnx_output_path != onnx_model:
shutil.copy2(onnx_model, onnx_output_path) | Convert ONNX to PPLNN. PPLNN is capable of optimizing onnx model. The optimized algorithm is saved into `algo_file` in json format. Note that `input_shapes` actually require multiple shapes of inputs in its original design. But in the pipeline of our codebase, we only pass one input shape which can be modified by users' own preferences. Args: output_file_prefix (str): File path to save PPLNN optimization algorithm and ONNX file onnx_model (str): Input onnx model. device (str): A string specifying device, defaults to 'cuda:0'. input_shapes (Sequence[Sequence[int]] | None): Shapes for PPLNN optimization, default to None. Examples: >>> from mmdeploy.apis.pplnn import from_onnx >>> >>> from_onnx(onnx_model = 'example.onnx', output_file_prefix = 'example') |
188,837 | import os
import os.path as osp
import tempfile
from subprocess import call
from typing import List, Optional, Union
import onnx
from .init_plugins import get_onnx2ncnn_path
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
The provided code snippet includes necessary dependencies for implementing the `get_output_model_file` function. Write a Python function `def get_output_model_file(onnx_path: str, work_dir: Optional[str] = None) -> List[str]` to solve the following problem:
Returns the path to the .param, .bin file with export result. Args: onnx_path (str): The path of the onnx model. work_dir (str|None): The path of the directory for saving the results. Defaults to `None`, which means using the directory of onnx_path. Returns: List[str]: The path of the files where the export result will be located.
Here is the function:
def get_output_model_file(onnx_path: str,
work_dir: Optional[str] = None) -> List[str]:
"""Returns the path to the .param, .bin file with export result.
Args:
onnx_path (str): The path of the onnx model.
work_dir (str|None): The path of the directory for saving the results.
Defaults to `None`, which means using the directory of onnx_path.
Returns:
List[str]: The path of the files where the export result will be
located.
"""
if work_dir is None:
work_dir = osp.dirname(onnx_path)
mkdir_or_exist(osp.abspath(work_dir))
file_name = osp.splitext(osp.split(onnx_path)[1])[0]
save_param = osp.join(work_dir, file_name + '.param')
save_bin = osp.join(work_dir, file_name + '.bin')
return [save_param, save_bin] | Returns the path to the .param, .bin file with export result. Args: onnx_path (str): The path of the onnx model. work_dir (str|None): The path of the directory for saving the results. Defaults to `None`, which means using the directory of onnx_path. Returns: List[str]: The path of the files where the export result will be located. |
188,838 | import os
import os.path as osp
import tempfile
from subprocess import call
from typing import List, Optional, Union
import onnx
from .init_plugins import get_onnx2ncnn_path
def get_onnx2ncnn_path() -> str:
"""Get mmdeploy_onnx2ncnn path.
Returns:
str: A path of mmdeploy_onnx2ncnn tool.
"""
candidates = ['./mmdeploy_onnx2ncnn', './mmdeploy_onnx2ncnn.exe']
onnx2ncnn_path = get_file_path(os.path.dirname(__file__), candidates)
if onnx2ncnn_path is None or not os.path.exists(onnx2ncnn_path):
onnx2ncnn_path = get_file_path('', candidates)
if onnx2ncnn_path is None or not os.path.exists(onnx2ncnn_path):
onnx2ncnn_path = shutil.which('mmdeploy_onnx2ncnn')
onnx2ncnn_path = '' if onnx2ncnn_path is None else onnx2ncnn_path
return onnx2ncnn_path
The provided code snippet includes necessary dependencies for implementing the `from_onnx` function. Write a Python function `def from_onnx(onnx_model: Union[onnx.ModelProto, str], output_file_prefix: str)` to solve the following problem:
Convert ONNX to ncnn. The inputs of ncnn include a model file and a weight file. We need to use an executable program to convert the `.onnx` file to a `.param` file and a `.bin` file. The output files will save to work_dir. Example: >>> from mmdeploy.apis.ncnn import from_onnx >>> onnx_path = 'work_dir/end2end.onnx' >>> output_file_prefix = 'work_dir/end2end' >>> from_onnx(onnx_path, output_file_prefix) Args: onnx_path (ModelProto|str): The path of the onnx model. output_file_prefix (str): The path to save the output ncnn file.
Here is the function:
def from_onnx(onnx_model: Union[onnx.ModelProto, str],
output_file_prefix: str):
"""Convert ONNX to ncnn.
The inputs of ncnn include a model file and a weight file. We need to use
an executable program to convert the `.onnx` file to a `.param` file and
a `.bin` file. The output files will save to work_dir.
Example:
>>> from mmdeploy.apis.ncnn import from_onnx
>>> onnx_path = 'work_dir/end2end.onnx'
>>> output_file_prefix = 'work_dir/end2end'
>>> from_onnx(onnx_path, output_file_prefix)
Args:
onnx_path (ModelProto|str): The path of the onnx model.
output_file_prefix (str): The path to save the output ncnn file.
"""
if not isinstance(onnx_model, str):
onnx_path = tempfile.NamedTemporaryFile(suffix='.onnx').name
onnx.save(onnx_model, onnx_path)
else:
onnx_path = onnx_model
save_param = output_file_prefix + '.param'
save_bin = output_file_prefix + '.bin'
onnx2ncnn_path = get_onnx2ncnn_path()
ret_code = call([onnx2ncnn_path, onnx_path, save_param, save_bin])
assert ret_code == 0, 'onnx2ncnn failed' | Convert ONNX to ncnn. The inputs of ncnn include a model file and a weight file. We need to use an executable program to convert the `.onnx` file to a `.param` file and a `.bin` file. The output files will save to work_dir. Example: >>> from mmdeploy.apis.ncnn import from_onnx >>> onnx_path = 'work_dir/end2end.onnx' >>> output_file_prefix = 'work_dir/end2end' >>> from_onnx(onnx_path, output_file_prefix) Args: onnx_path (ModelProto|str): The path of the onnx model. output_file_prefix (str): The path to save the output ncnn file. |
188,839 | import os
import shutil
from mmdeploy.utils import get_file_path
import os
if os.path.exists(ops_path):
cdll.LoadLibrary(ops_path)
The provided code snippet includes necessary dependencies for implementing the `get_ops_path` function. Write a Python function `def get_ops_path() -> str` to solve the following problem:
Get ncnn custom ops library path. Returns: str: The library path of ncnn custom ops.
Here is the function:
def get_ops_path() -> str:
"""Get ncnn custom ops library path.
Returns:
str: The library path of ncnn custom ops.
"""
candidates = [
'../../lib/libmmdeploy_ncnn_ops.so', '../../lib/mmdeploy_ncnn_ops.dll'
]
return get_file_path(os.path.dirname(__file__), candidates) | Get ncnn custom ops library path. Returns: str: The library path of ncnn custom ops. |
188,840 | import os.path as osp
from subprocess import call
from typing import List
import mmengine
from .init_plugins import get_ncnn2int8_path
The provided code snippet includes necessary dependencies for implementing the `get_quant_model_file` function. Write a Python function `def get_quant_model_file(onnx_path: str, work_dir: str) -> List[str]` to solve the following problem:
Returns the path to quant onnx and table with export result. Args: onnx_path (str): The path to the fp32 onnx model. work_dir (str): The path to the directory for saving the results. Returns: List[str]: The path to the files where the export result will be located.
Here is the function:
def get_quant_model_file(onnx_path: str, work_dir: str) -> List[str]:
"""Returns the path to quant onnx and table with export result.
Args:
onnx_path (str): The path to the fp32 onnx model.
work_dir (str): The path to the directory for saving the results.
Returns:
List[str]: The path to the files where the export result will be
located.
"""
mmengine.mkdir_or_exist(osp.abspath(work_dir))
base_name = osp.splitext(osp.split(onnx_path)[1])[0]
quant_onnx = osp.join(work_dir, base_name + '_quant.onnx')
quant_table = osp.join(work_dir, base_name + '.table')
quant_param = osp.join(work_dir, base_name + '_int8.param')
quant_bin = osp.join(work_dir, base_name + '_int8.bin')
return [quant_onnx, quant_table, quant_param, quant_bin] | Returns the path to quant onnx and table with export result. Args: onnx_path (str): The path to the fp32 onnx model. work_dir (str): The path to the directory for saving the results. Returns: List[str]: The path to the files where the export result will be located. |
188,841 | import os.path as osp
from subprocess import call
from typing import List
import mmengine
from .init_plugins import get_ncnn2int8_path
def get_ncnn2int8_path() -> str:
"""Get onnx2int8 path.
Returns:
str: A path of ncnn2int8 tool.
"""
ncnn2int8_path = shutil.which('ncnn2int8')
if ncnn2int8_path is None:
raise Exception(
'Cannot find ncnn2int8, try `export PATH=/path/to/ncnn2int8`')
return ncnn2int8_path
The provided code snippet includes necessary dependencies for implementing the `ncnn2int8` function. Write a Python function `def ncnn2int8(param: str, bin: str, table: str, int8_param: str, int8_bin: str)` to solve the following problem:
Convert ncnn float model to quantized model. The inputs of ncnn include float model and weight file. We need to use a executable program to convert the float model to int8 model with calibration table. Example: >>> from mmdeploy.backend.ncnn.quant import ncnn2int8 >>> param = 'work_dir/end2end.param' >>> bin = 'work_dir/end2end.bin' >>> table = 'work_dir/end2end.table' >>> int8_param = 'work_dir/end2end_int8.param' >>> int8_bin = 'work_dir/end2end_int8.bin' >>> ncnn2int8(param, bin, table, int8_param, int8_bin) Args: param (str): The path of ncnn float model graph. bin (str): The path of ncnn float weight model weight. table (str): The path of ncnn calibration table. int8_param (str): The path of ncnn low bit model graph. int8_bin (str): The path of ncnn low bit weight model weight.
Here is the function:
def ncnn2int8(param: str, bin: str, table: str, int8_param: str,
int8_bin: str):
"""Convert ncnn float model to quantized model.
The inputs of ncnn include float model and weight file. We need to use
a executable program to convert the float model to int8 model with
calibration table.
Example:
>>> from mmdeploy.backend.ncnn.quant import ncnn2int8
>>> param = 'work_dir/end2end.param'
>>> bin = 'work_dir/end2end.bin'
>>> table = 'work_dir/end2end.table'
>>> int8_param = 'work_dir/end2end_int8.param'
>>> int8_bin = 'work_dir/end2end_int8.bin'
>>> ncnn2int8(param, bin, table, int8_param, int8_bin)
Args:
param (str): The path of ncnn float model graph.
bin (str): The path of ncnn float weight model weight.
table (str): The path of ncnn calibration table.
int8_param (str): The path of ncnn low bit model graph.
int8_bin (str): The path of ncnn low bit weight model weight.
"""
ncnn2int8 = get_ncnn2int8_path()
call([ncnn2int8, param, bin, int8_param, int8_bin, table]) | Convert ncnn float model to quantized model. The inputs of ncnn include float model and weight file. We need to use a executable program to convert the float model to int8 model with calibration table. Example: >>> from mmdeploy.backend.ncnn.quant import ncnn2int8 >>> param = 'work_dir/end2end.param' >>> bin = 'work_dir/end2end.bin' >>> table = 'work_dir/end2end.table' >>> int8_param = 'work_dir/end2end_int8.param' >>> int8_bin = 'work_dir/end2end_int8.bin' >>> ncnn2int8(param, bin, table, int8_param, int8_bin) Args: param (str): The path of ncnn float model graph. bin (str): The path of ncnn float weight model weight. table (str): The path of ncnn calibration table. int8_param (str): The path of ncnn low bit model graph. int8_bin (str): The path of ncnn low bit weight model weight. |
188,842 | import glob
import logging
import os
import sys
import traceback
from typing import Callable, Optional, Union
from mmdeploy.utils.logging import get_logger
import logging
The provided code snippet includes necessary dependencies for implementing the `target_wrapper` function. Write a Python function `def target_wrapper(target: Callable, log_level: int, ret_value: Optional[mp.Value] = None, *args, **kwargs)` to solve the following problem:
The wrapper used to start a new subprocess. Args: target (Callable): The target function to be wrapped. log_level (int): Log level for logging. ret_value (mp.Value): The success flag of target. Return: Any: The return of target.
Here is the function:
def target_wrapper(target: Callable,
log_level: int,
ret_value: Optional[mp.Value] = None,
*args,
**kwargs):
"""The wrapper used to start a new subprocess.
Args:
target (Callable): The target function to be wrapped.
log_level (int): Log level for logging.
ret_value (mp.Value): The success flag of target.
Return:
Any: The return of target.
"""
logger = logging.getLogger()
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S')
logger.level
logger.setLevel(log_level)
if ret_value is not None:
ret_value.value = -1
try:
result = target(*args, **kwargs)
if ret_value is not None:
ret_value.value = 0
return result
except Exception as e:
logging.error(e)
traceback.print_exc(file=sys.stdout) | The wrapper used to start a new subprocess. Args: target (Callable): The target function to be wrapped. log_level (int): Log level for logging. ret_value (mp.Value): The success flag of target. Return: Any: The return of target. |
188,843 | import glob
import logging
import os
import sys
import traceback
from typing import Callable, Optional, Union
from mmdeploy.utils.logging import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO) -> logging.Logger:
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
logging.Logger: The obtained logger
"""
logger = get_logger(
name='mmdeploy', log_file=log_file, log_level=log_level)
return logger
The provided code snippet includes necessary dependencies for implementing the `deprecate` function. Write a Python function `def deprecate(status: str = 'future', dst_obj: Optional[Union[object, str]] = None, msg: str = '', *args, **kwargs) -> None` to solve the following problem:
Deprecate a function or a class. Args: status (str, optional): The status of the function or class. Defaults to future. dst_obj (str, object, optional): The object that will replace the original one. Defaults to None. msg (str): Additional message to be printed. Examples: >>> from math import ceil >>> from mmdeploy.utils.utils import deprecate >>> @deprecate(status='past', dst_obj=ceil, msg='') >>> def my_ceil(num): >>> num = num if(num==int(num)) else int(num) + 1 >>> return num
Here is the function:
def deprecate(status: str = 'future',
dst_obj: Optional[Union[object, str]] = None,
msg: str = '',
*args,
**kwargs) -> None:
"""Deprecate a function or a class.
Args:
status (str, optional): The status of the function or class.
Defaults to future.
dst_obj (str, object, optional): The object that will replace
the original one. Defaults to None.
msg (str): Additional message to be printed.
Examples:
>>> from math import ceil
>>> from mmdeploy.utils.utils import deprecate
>>> @deprecate(status='past', dst_obj=ceil, msg='')
>>> def my_ceil(num):
>>> num = num if(num==int(num)) else int(num) + 1
>>> return num
"""
logger = get_root_logger()
def _register(src_obj):
def fun(*args, **kwargs):
if status == 'future':
logger.warning(
f'DeprecationWarning: {src_obj.__name__} will be '
f'deprecated in the future. {msg}')
elif status == 'past':
assert dst_obj is not None, 'for deprecated object, there'
' must be a destination object'
logger.warning(
f'DeprecationWarning: {src_obj.__name__} was deprecated,'
f' use {dst_obj.__name__} instead. {msg}')
else:
raise KeyError(f'Unexpected key {status}')
result = src_obj(*args, **kwargs)
return result
return fun
return _register | Deprecate a function or a class. Args: status (str, optional): The status of the function or class. Defaults to future. dst_obj (str, object, optional): The object that will replace the original one. Defaults to None. msg (str): Additional message to be printed. Examples: >>> from math import ceil >>> from mmdeploy.utils.utils import deprecate >>> @deprecate(status='past', dst_obj=ceil, msg='') >>> def my_ceil(num): >>> num = num if(num==int(num)) else int(num) + 1 >>> return num |
188,844 | import glob
import logging
import os
import sys
import traceback
from typing import Callable, Optional, Union
from mmdeploy.utils.logging import get_logger
The provided code snippet includes necessary dependencies for implementing the `get_file_path` function. Write a Python function `def get_file_path(prefix, candidates) -> str` to solve the following problem:
Search for file in candidates. Args: prefix (str): Prefix of the paths. candidates (str): Candidate paths Returns: str: file path or '' if not found
Here is the function:
def get_file_path(prefix, candidates) -> str:
"""Search for file in candidates.
Args:
prefix (str): Prefix of the paths.
candidates (str): Candidate paths
Returns:
str: file path or '' if not found
"""
for candidate in candidates:
wildcard = os.path.abspath(os.path.join(prefix, candidate))
paths = glob.glob(wildcard)
if paths:
lib_path = paths[0]
return lib_path
return '' | Search for file in candidates. Args: prefix (str): Prefix of the paths. candidates (str): Candidate paths Returns: str: file path or '' if not found |
188,845 | from typing import Dict, List, Optional, Union
import mmengine
from .constants import Backend, Codebase, Task
from .utils import deprecate, get_root_logger
def get_ir_config(deploy_cfg: Union[str, mmengine.Config]) -> Dict:
"""Get the IR parameters in export() from config.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
Dict: The config dictionary of IR parameters
"""
deploy_cfg = load_config(deploy_cfg)[0]
ir_config = deploy_cfg.get('ir_config', None)
if ir_config is None:
# TODO: deprecate in future
ir_config = deploy_cfg.get('onnx_config', {})
return ir_config
The provided code snippet includes necessary dependencies for implementing the `get_input_shape` function. Write a Python function `def get_input_shape(deploy_cfg: Union[str, mmengine.Config]) -> List[int]` to solve the following problem:
Get the input shape for static exporting. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: List[int]: The input shape for backend model (axis 2 and 3), e.g [512, 512].
Here is the function:
def get_input_shape(deploy_cfg: Union[str, mmengine.Config]) -> List[int]:
"""Get the input shape for static exporting.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
List[int]: The input shape for backend model (axis 2 and 3),
e.g [512, 512].
"""
input_shape = get_ir_config(deploy_cfg).get('input_shape', None)
if input_shape is not None:
assert len(input_shape) == 2, 'length of input_shape should equal to 2'
return input_shape | Get the input shape for static exporting. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: List[int]: The input shape for backend model (axis 2 and 3), e.g [512, 512]. |
188,846 | from typing import Dict, List, Optional, Union
import mmengine
from .constants import Backend, Codebase, Task
from .utils import deprecate, get_root_logger
The provided code snippet includes necessary dependencies for implementing the `cfg_apply_marks` function. Write a Python function `def cfg_apply_marks(deploy_cfg: Union[str, mmengine.Config]) -> Optional[bool]` to solve the following problem:
Check if the model needs to be partitioned by checking if the config contains 'apply_marks'. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: bool or None: Whether config contains 'apply_marks'.
Here is the function:
def cfg_apply_marks(deploy_cfg: Union[str, mmengine.Config]) -> Optional[bool]:
"""Check if the model needs to be partitioned by checking if the config
contains 'apply_marks'.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
bool or None: Whether config contains 'apply_marks'.
"""
partition_config = deploy_cfg.get('partition_config', None)
if partition_config is None:
return None
apply_marks = partition_config.get('apply_marks', False)
return apply_marks | Check if the model needs to be partitioned by checking if the config contains 'apply_marks'. Args: deploy_cfg (str | mmengine.Config): The path or content of config. Returns: bool or None: Whether config contains 'apply_marks'. |
188,847 | import importlib
from mmdeploy.utils import Codebase
def get_library_version(lib):
"""Try to get the version of a library if it has been installed.
Args:
lib (str): The name of library.
Returns:
None | str: If the library has been installed, return version.
"""
try:
lib = importlib.import_module(lib)
if hasattr(lib, '__version__'):
version = lib.__version__
else:
version = None
except Exception:
version = None
return version
The provided code snippet includes necessary dependencies for implementing the `get_codebase_version` function. Write a Python function `def get_codebase_version()` to solve the following problem:
Get the version dictionary of all supported codebases. Returns: Dict: The name and the version of supported codebases.
Here is the function:
def get_codebase_version():
"""Get the version dictionary of all supported codebases.
Returns:
Dict: The name and the version of supported codebases.
"""
version_dict = dict()
for enum in Codebase:
codebase = enum.value
version_dict[codebase] = get_library_version(codebase)
return version_dict | Get the version dictionary of all supported codebases. Returns: Dict: The name and the version of supported codebases. |
188,848 | import importlib
from mmdeploy.utils import Codebase
def get_library_version(lib):
"""Try to get the version of a library if it has been installed.
Args:
lib (str): The name of library.
Returns:
None | str: If the library has been installed, return version.
"""
try:
lib = importlib.import_module(lib)
if hasattr(lib, '__version__'):
version = lib.__version__
else:
version = None
except Exception:
version = None
return version
The provided code snippet includes necessary dependencies for implementing the `get_backend_version` function. Write a Python function `def get_backend_version()` to solve the following problem:
Get the version dictionary of some supported backend. Returns: Dict: The name and the version of some supported backend.
Here is the function:
def get_backend_version():
"""Get the version dictionary of some supported backend.
Returns:
Dict: The name and the version of some supported backend.
"""
backend_library_list = ['tensorrt', 'onnxruntime', 'ncnn', 'tvm']
version_dict = dict()
for backend in backend_library_list:
version_dict[backend] = get_library_version(backend)
return version_dict | Get the version dictionary of some supported backend. Returns: Dict: The name and the version of some supported backend. |
188,849 | import re
from typing import Optional
def parse_cuda_device_id(device: str) -> int:
"""Parse cuda device index from a string.
Args:
device (str): The typical style of string specifying cuda device,
e.g.: 'cuda:0'.
Returns:
int: The parsed device id, defaults to `0`.
"""
match_result = re.match('([^:]+)(:[0-9]+)?$', device)
assert match_result is not None, f'Can not parse device {device}.'
assert match_result.group(1).lower() == 'cuda', 'Not cuda device.'
device_id = 0 if match_result.lastindex == 1 else int(
match_result.group(2)[1:])
return device_id
The provided code snippet includes necessary dependencies for implementing the `parse_device_id` function. Write a Python function `def parse_device_id(device: str) -> Optional[int]` to solve the following problem:
Parse device index from a string. Args: device (str): The typical style of string specifying device, e.g.: 'cuda:0', 'cpu'. Returns: Optional[int]: The return value depends on the type of device. If device is 'cuda': cuda device index, defaults to `0`. If device is 'cpu': `-1`. Otherwise, `None` will be returned.
Here is the function:
def parse_device_id(device: str) -> Optional[int]:
"""Parse device index from a string.
Args:
device (str): The typical style of string specifying device,
e.g.: 'cuda:0', 'cpu'.
Returns:
Optional[int]: The return value depends on the type of device.
If device is 'cuda': cuda device index, defaults to `0`.
If device is 'cpu': `-1`.
Otherwise, `None` will be returned.
"""
if device == 'cpu':
return -1
if 'cuda' in device:
return parse_cuda_device_id(device)
return None | Parse device index from a string. Args: device (str): The typical style of string specifying device, e.g.: 'cuda:0', 'cpu'. Returns: Optional[int]: The return value depends on the type of device. If device is 'cuda': cuda device index, defaults to `0`. If device is 'cpu': `-1`. Otherwise, `None` will be returned. |
188,850 | import re
from typing import Optional
The provided code snippet includes necessary dependencies for implementing the `parse_device_type` function. Write a Python function `def parse_device_type(device: str) -> str` to solve the following problem:
Parse device type from a string. Args: device (str): The typical style of string specifying cuda device, e.g.: 'cuda:0', 'cpu', 'npu'. Returns: str: The parsed device type such as 'cuda', 'cpu', 'npu'.
Here is the function:
def parse_device_type(device: str) -> str:
"""Parse device type from a string.
Args:
device (str): The typical style of string specifying cuda device,
e.g.: 'cuda:0', 'cpu', 'npu'.
Returns:
str: The parsed device type such as 'cuda', 'cpu', 'npu'.
"""
device_type = device
if ':' in device:
device_type = device.split(':')[0]
return device_type | Parse device type from a string. Args: device (str): The typical style of string specifying cuda device, e.g.: 'cuda:0', 'cpu', 'npu'. Returns: str: The parsed device type such as 'cuda', 'cpu', 'npu'. |
188,851 | from torch.utils.data import Dataset
The provided code snippet includes necessary dependencies for implementing the `is_can_sort_dataset` function. Write a Python function `def is_can_sort_dataset(dataset: Dataset) -> bool` to solve the following problem:
Checking for the possibility of sorting the dataset by fields 'height' and 'width'. Args: dataset (Dataset): The dataset. Returns: bool: Is it possible or not to sort the dataset.
Here is the function:
def is_can_sort_dataset(dataset: Dataset) -> bool:
"""Checking for the possibility of sorting the dataset by fields 'height'
and 'width'.
Args:
dataset (Dataset): The dataset.
Returns:
bool: Is it possible or not to sort the dataset.
"""
is_sort_possible = \
hasattr(dataset, 'data_infos') and \
dataset.data_infos and \
all(key in dataset.data_infos[0] for key in ('height', 'width'))
return is_sort_possible | Checking for the possibility of sorting the dataset by fields 'height' and 'width'. Args: dataset (Dataset): The dataset. Returns: bool: Is it possible or not to sort the dataset. |
188,852 | from torch.utils.data import Dataset
The provided code snippet includes necessary dependencies for implementing the `sort_dataset` function. Write a Python function `def sort_dataset(dataset: Dataset) -> Dataset` to solve the following problem:
Sorts the dataset by image height and width. Args: dataset (Dataset): The dataset. Returns: Dataset: Sorted dataset.
Here is the function:
def sort_dataset(dataset: Dataset) -> Dataset:
"""Sorts the dataset by image height and width.
Args:
dataset (Dataset): The dataset.
Returns:
Dataset: Sorted dataset.
"""
sort_data_infos = sorted(
dataset.data_infos, key=lambda e: (e['height'], e['width']))
sort_img_ids = [e['id'] for e in sort_data_infos]
dataset.data_infos = sort_data_infos
dataset.img_ids = sort_img_ids
return dataset | Sorts the dataset by image height and width. Args: dataset (Dataset): The dataset. Returns: Dataset: Sorted dataset. |
188,853 | import asyncio
import os
import shutil
import torch
from pyppeteer import launch
from torchvision.models import resnet18
from mmdeploy.core import FUNCTION_REWRITER, RewriterContext, patch_model
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `forward_of_resnet` function. Write a Python function `def forward_of_resnet(self, x)` to solve the following problem:
Rewrite the forward implementation of resnet. Early return the feature map after two down-sampling steps.
Here is the function:
def forward_of_resnet(self, x):
"""Rewrite the forward implementation of resnet.
Early return the feature map after two down-sampling steps.
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
return x | Rewrite the forward implementation of resnet. Early return the feature map after two down-sampling steps. |
188,854 | import asyncio
import os
import shutil
import torch
from pyppeteer import launch
from torchvision.models import resnet18
from mmdeploy.core import FUNCTION_REWRITER, RewriterContext, patch_model
from mmdeploy.utils import get_root_logger
def rewrite_resnet18(original_path: str, rewritten_path: str):
# prepare inputs and original model
inputs = torch.rand(1, 3, 224, 224)
original_model = resnet18(pretrained=False)
# export original model
torch.onnx.export(original_model, inputs, original_path)
# patch model
patched_model = patch_model(original_model, cfg={}, backend='default')
# export rewritten onnx under a rewriter context manager
with RewriterContext(cfg={}, backend='default'), torch.no_grad():
torch.onnx.export(patched_model, inputs, rewritten_path) | null |
188,855 | import asyncio
import os
import shutil
import torch
from pyppeteer import launch
from torchvision.models import resnet18
from mmdeploy.core import FUNCTION_REWRITER, RewriterContext, patch_model
from mmdeploy.utils import get_root_logger
def screen_size():
async def visualize(original_path: str, rewritten_path: str):
# launch a web browser
browser = await launch(headless=False, args=['--start-maximized'])
# create two new pages
page2 = await browser.newPage()
page1 = await browser.newPage()
# go to netron.app
width, height = screen_size()
await page1.setViewport({'width': width, 'height': height})
await page2.setViewport({'width': width, 'height': height})
await page1.goto('https://netron.app/')
await page2.goto('https://netron.app/')
await asyncio.sleep(2)
# open local two onnx files
mupinput1 = await page1.querySelector("input[type='file']")
mupinput2 = await page2.querySelector("input[type='file']")
await mupinput1.uploadFile(original_file_path)
await mupinput2.uploadFile(rewritten_file_path)
await asyncio.sleep(4)
for _ in range(6):
await page1.click('#zoom-out-button')
await asyncio.sleep(0.3)
await asyncio.sleep(1)
await page1.screenshot({'path': original_path.replace('.onnx', '.png')},
clip={
'x': width / 4,
'y': 0,
'width': width / 2,
'height': height
})
await page2.screenshot({'path': rewritten_path.replace('.onnx', '.png')},
clip={
'x': width / 4,
'y': 0,
'width': width / 2,
'height': height
})
await browser.close() | null |
188,856 | import argparse
import math
import cv2
from mmdeploy_runtime import Detector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument('image_path', help='path of an image')
args = parser.parse_args()
return args | null |
188,857 | import argparse
from math import cos, sin
import cv2
import numpy as np
from mmdeploy_runtime import RotatedDetector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path', help='path of SDK model dumped by model converter')
parser.add_argument('image_path', help='path of an image')
args = parser.parse_args()
return args | null |
188,858 | import argparse
import cv2
from mmdeploy_runtime import TextDetector, TextRecognizer
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument('image_path', help='path of an image')
parser.add_argument(
'--textdet',
default='',
help='path of mmdeploy text-detector SDK model dumped by'
'model converter',
)
parser.add_argument(
'--textrecog',
default='',
help='path of mmdeploy text-recognizer SDK model dumped by'
'model converter',
)
args = parser.parse_args()
return args | null |
188,859 | import argparse
import cv2
from mmdeploy_runtime import VideoRecognizer
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument('video_path', help='path of an video')
parser.add_argument(
'--clip_len', help='Frames of each sampled output clip', default=1)
parser.add_argument(
'--frame_interval',
help='Temporal interval of adjacent sampled frames.',
default=1)
parser.add_argument(
'--num_clips', help='Number of clips to be sampled', default=25)
args = parser.parse_args()
return args | null |
188,860 | import argparse
import cv2
from mmdeploy_runtime import VideoRecognizer
def SampleFrames(cap, clip_len, frame_interval, num_clips):
if not cap.isOpened():
print('failed to load video')
exit(-1)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
ori_clip_len = clip_len * frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(num_clips)
frame_inds = []
for i in range(num_clips):
clip_offset = int(i * avg_interval + avg_interval / 2.0)
for j in range(clip_len):
ind = (j * frame_interval + clip_offset) % num_frames
if num_frames <= ori_clip_len - 1:
ind = j % num_frames
frame_inds.append(ind)
unique_inds = sorted(list(set(frame_inds)))
buffer = {}
ind = 0
for i, tid in enumerate(unique_inds):
while ind < tid:
_, mat = cap.read()
ind += 1
_, mat = cap.read()
buffer[tid] = mat
ind += 1
clips = []
for tid in frame_inds:
clips.append(buffer[tid])
info = (clip_len, num_clips)
return clips, info | null |
188,861 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Segmentor
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument('image_path', help='path of an image')
args = parser.parse_args()
return args | null |
188,862 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Segmentor
def get_palette(num_classes=256):
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
return [tuple(c) for c in palette] | null |
188,863 | import argparse
import os
import cv2
import numpy as np
from mmdeploy_runtime import PoseTracker
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use SDK Python API')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'det_model',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument(
'pose_model',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument('video', help='video path or camera index')
parser.add_argument('--output_dir', help='output directory', default=None)
parser.add_argument(
'--skeleton',
default='coco',
choices=['coco', 'coco_wholebody'],
help='skeleton for keypoints')
args = parser.parse_args()
if args.video.isnumeric():
args.video = int(args.video)
return args | null |
188,864 | import argparse
import os
import cv2
import numpy as np
from mmdeploy_runtime import PoseTracker
VISUALIZATION_CFG = dict(
coco=dict(
skeleton=[(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11),
(6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2),
(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6)],
palette=[(255, 128, 0), (255, 153, 51), (255, 178, 102), (230, 230, 0),
(255, 153, 255), (153, 204, 255), (255, 102, 255),
(255, 51, 255), (102, 178, 255), (51, 153, 255),
(255, 153, 153), (255, 102, 102), (255, 51, 51),
(153, 255, 153), (102, 255, 102), (51, 255, 51), (0, 255, 0),
(0, 0, 255), (255, 0, 0), (255, 255, 255)],
link_color=[
0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16
],
point_color=[16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0],
sigmas=[
0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072,
0.062, 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089
]),
coco_wholebody=dict(
skeleton=[(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11),
(6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2),
(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (15, 17),
(15, 18), (15, 19), (16, 20), (16, 21), (16, 22), (91, 92),
(92, 93), (93, 94), (94, 95), (91, 96), (96, 97), (97, 98),
(98, 99), (91, 100), (100, 101), (101, 102), (102, 103),
(91, 104), (104, 105), (105, 106), (106, 107), (91, 108),
(108, 109), (109, 110), (110, 111), (112, 113), (113, 114),
(114, 115), (115, 116), (112, 117), (117, 118), (118, 119),
(119, 120), (112, 121), (121, 122), (122, 123), (123, 124),
(112, 125), (125, 126), (126, 127), (127, 128), (112, 129),
(129, 130), (130, 131), (131, 132)],
palette=[(51, 153, 255), (0, 255, 0), (255, 128, 0), (255, 255, 255),
(255, 153, 255), (102, 178, 255), (255, 51, 51)],
link_color=[
1, 1, 2, 2, 0, 0, 0, 0, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1,
1, 2, 2, 2, 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1
],
point_color=[
0, 0, 0, 0, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 2, 2, 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1,
1, 1, 3, 2, 2, 2, 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1
],
sigmas=[
0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072,
0.062, 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089, 0.068,
0.066, 0.066, 0.092, 0.094, 0.094, 0.042, 0.043, 0.044, 0.043,
0.040, 0.035, 0.031, 0.025, 0.020, 0.023, 0.029, 0.032, 0.037,
0.038, 0.043, 0.041, 0.045, 0.013, 0.012, 0.011, 0.011, 0.012,
0.012, 0.011, 0.011, 0.013, 0.015, 0.009, 0.007, 0.007, 0.007,
0.012, 0.009, 0.008, 0.016, 0.010, 0.017, 0.011, 0.009, 0.011,
0.009, 0.007, 0.013, 0.008, 0.011, 0.012, 0.010, 0.034, 0.008,
0.008, 0.009, 0.008, 0.008, 0.007, 0.010, 0.008, 0.009, 0.009,
0.009, 0.007, 0.007, 0.008, 0.011, 0.008, 0.008, 0.008, 0.01,
0.008, 0.029, 0.022, 0.035, 0.037, 0.047, 0.026, 0.025, 0.024,
0.035, 0.018, 0.024, 0.022, 0.026, 0.017, 0.021, 0.021, 0.032,
0.02, 0.019, 0.022, 0.031, 0.029, 0.022, 0.035, 0.037, 0.047,
0.026, 0.025, 0.024, 0.035, 0.018, 0.024, 0.022, 0.026, 0.017,
0.021, 0.021, 0.032, 0.02, 0.019, 0.022, 0.031
]))
def visualize(frame,
results,
output_dir,
frame_id,
thr=0.5,
resize=1280,
skeleton_type='coco'):
skeleton = VISUALIZATION_CFG[skeleton_type]['skeleton']
palette = VISUALIZATION_CFG[skeleton_type]['palette']
link_color = VISUALIZATION_CFG[skeleton_type]['link_color']
point_color = VISUALIZATION_CFG[skeleton_type]['point_color']
scale = resize / max(frame.shape[0], frame.shape[1])
keypoints, bboxes, _ = results
scores = keypoints[..., 2]
keypoints = (keypoints[..., :2] * scale).astype(int)
bboxes *= scale
img = cv2.resize(frame, (0, 0), fx=scale, fy=scale)
for kpts, score, bbox in zip(keypoints, scores, bboxes):
show = [1] * len(kpts)
for (u, v), color in zip(skeleton, link_color):
if score[u] > thr and score[v] > thr:
cv2.line(img, kpts[u], tuple(kpts[v]), palette[color], 1,
cv2.LINE_AA)
else:
show[u] = show[v] = 0
for kpt, show, color in zip(kpts, show, point_color):
if show:
cv2.circle(img, kpt, 1, palette[color], 2, cv2.LINE_AA)
if output_dir:
cv2.imwrite(f'{output_dir}/{str(frame_id).zfill(6)}.jpg', img)
else:
cv2.imshow('pose_tracker', img)
return cv2.waitKey(1) != 'q'
return True | null |
188,865 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import PoseDetector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument('image_path', help='path of an image')
parser.add_argument(
'--bbox',
default=None,
nargs='+',
type=int,
help='bounding box of an object in format (x, y, w, h)')
args = parser.parse_args()
return args | null |
188,866 | import argparse
import cv2
from mmdeploy_runtime import Restorer
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path', help='path of SDK model dumped by model converter')
parser.add_argument('image_path', help='path of an image')
args = parser.parse_args()
return args | null |
188,867 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Detector, PoseDetector
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use SDK Python API')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'det_model_path',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument(
'pose_model_path',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument('image_path', help='path of input image')
args = parser.parse_args()
return args | null |
188,868 | import argparse
import cv2
import numpy as np
from mmdeploy_runtime import Detector, PoseDetector
def visualize(frame, keypoints, filename, thr=0.5, resize=1280):
skeleton = [(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11),
(6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2),
(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6)]
palette = [(255, 128, 0), (255, 153, 51), (255, 178, 102), (230, 230, 0),
(255, 153, 255), (153, 204, 255), (255, 102, 255),
(255, 51, 255), (102, 178, 255),
(51, 153, 255), (255, 153, 153), (255, 102, 102), (255, 51, 51),
(153, 255, 153), (102, 255, 102), (51, 255, 51), (0, 255, 0),
(0, 0, 255), (255, 0, 0), (255, 255, 255)]
link_color = [
0, 0, 0, 0, 7, 7, 7, 9, 9, 9, 9, 9, 16, 16, 16, 16, 16, 16, 16
]
point_color = [16, 16, 16, 16, 16, 9, 9, 9, 9, 9, 9, 0, 0, 0, 0, 0, 0]
scale = resize / max(frame.shape[0], frame.shape[1])
scores = keypoints[..., 2]
keypoints = (keypoints[..., :2] * scale).astype(int)
img = cv2.resize(frame, (0, 0), fx=scale, fy=scale)
for kpts, score in zip(keypoints, scores):
show = [0] * len(kpts)
for (u, v), color in zip(skeleton, link_color):
if score[u] > thr and score[v] > thr:
cv2.line(img, kpts[u], tuple(kpts[v]), palette[color], 1,
cv2.LINE_AA)
show[u] = show[v] = 1
for kpt, show, color in zip(kpts, show, point_color):
if show:
cv2.circle(img, kpt, 1, palette[color], 2, cv2.LINE_AA)
cv2.imwrite(filename, img) | null |
188,869 | import argparse
import cv2
from mmdeploy_runtime import Classifier
def parse_args():
parser = argparse.ArgumentParser(
description='show how to use sdk python api')
parser.add_argument('device_name', help='name of device, cuda or cpu')
parser.add_argument(
'model_path',
help='path of mmdeploy SDK model dumped by model converter')
parser.add_argument('image_path', help='path of an image')
args = parser.parse_args()
return args | null |
188,870 | import argparse
import json
import cv2
from mmdeploy_runtime import Context, Device, Model, Pipeline
def parse_args():
parser = argparse.ArgumentParser(
description='Demo of MMDeploy SDK pipeline API')
parser.add_argument('device', help='name of device, cuda or cpu')
parser.add_argument('det_model_path', help='path of detection model')
parser.add_argument('cls_model_path', help='path of classification model')
parser.add_argument('image_path', help='path to test image')
args = parser.parse_args()
return args | null |
188,871 | import grpc
import inference_pb2 as inference__pb2
class Inference(object):
"""The inference service definition."""
def Echo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Echo',
inference__pb2.Empty.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
def Init(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Init',
inference__pb2.Model.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
def OutputNames(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/OutputNames',
inference__pb2.Empty.SerializeToString,
inference__pb2.Names.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
def Inference(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Inference',
inference__pb2.TensorList.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
def Destroy(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request, target, '/mmdeploy.Inference/Destroy',
inference__pb2.Empty.SerializeToString,
inference__pb2.Reply.FromString, options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout,
metadata)
def add_InferenceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Echo':
grpc.unary_unary_rpc_method_handler(
servicer.Echo,
request_deserializer=inference__pb2.Empty.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
'Init':
grpc.unary_unary_rpc_method_handler(
servicer.Init,
request_deserializer=inference__pb2.Model.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
'OutputNames':
grpc.unary_unary_rpc_method_handler(
servicer.OutputNames,
request_deserializer=inference__pb2.Empty.FromString,
response_serializer=inference__pb2.Names.SerializeToString,
),
'Inference':
grpc.unary_unary_rpc_method_handler(
servicer.Inference,
request_deserializer=inference__pb2.TensorList.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
'Destroy':
grpc.unary_unary_rpc_method_handler(
servicer.Destroy,
request_deserializer=inference__pb2.Empty.FromString,
response_serializer=inference__pb2.Reply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'mmdeploy.Inference', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler, )) | null |
188,872 | import argparse
import logging
from mmdeploy.backend.tensorrt import from_onnx
from mmdeploy.backend.tensorrt.utils import get_trt_log_level
from mmdeploy.utils import (get_common_config, get_model_inputs,
get_root_logger, load_config)
def parse_args():
parser = argparse.ArgumentParser(description='Convert ONNX to TensorRT.')
parser.add_argument('deploy_cfg', help='deploy config path')
parser.add_argument('onnx_path', help='ONNX model path')
parser.add_argument('output_prefix', help='output TensorRT engine prefix')
parser.add_argument('--device-id', help='`the CUDA device id', default=0)
parser.add_argument(
'--calib-file',
help='`the calibration data used to calibrate engine to int8',
default=None)
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args | null |
188,873 | import argparse
import numpy as np
from texttable import Texttable
def parse_args():
parser = argparse.ArgumentParser(
description='Analyze sdk profiler file tool.')
parser.add_argument('profile_file', help='SDK profile file path')
args = parser.parse_args()
return args | null |
188,874 | import argparse
import numpy as np
from texttable import Texttable
def get_name(addr, prev, addr2name, used_addr, depth, skip):
node_name = addr2name[addr] if not skip else ''
if addr not in prev:
return ' ' * depth * 4 + node_name
prev_addr = prev[addr]
if prev_addr in used_addr:
depth += 1
skip = True
prev_name = get_name(prev[addr], prev, addr2name, used_addr, depth, skip)
if len(prev_name.split()) == 0:
return prev_name + node_name
return prev_name + '/' + node_name | null |
188,875 | import argparse
import os
import os.path as osp
import yaml
from mmengine import Config
from mmdeploy.utils import get_backend, get_task_type, load_config
def parse_args():
parser = argparse.ArgumentParser(
description='from yaml export markdown table')
parser.add_argument('yml_file', help='input yml config path')
parser.add_argument('output', help='output markdown file path')
parser.add_argument(
'--backends',
nargs='+',
help='backends you want to generate',
default=[
'onnxruntime', 'tensorrt', 'torchscript', 'pplnn', 'openvino',
'ncnn'
])
args = parser.parse_args()
return args | null |
188,876 | import argparse
import logging
from copy import deepcopy
from mmengine import Config
from torch.utils.data import DataLoader
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
class QuantizationImageDataset(Dataset):
def __init__(
self,
path: str,
deploy_cfg: Config,
model_cfg: Config,
file_client_args: Optional[dict] = None,
extensions: Sequence[str] = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp',
'.pgm', '.tif'),
):
super().__init__()
task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu')
self.task_processor = task_processor
self.samples = []
self.extensions = tuple(set([i.lower() for i in extensions]))
self.file_client = FileClient.infer_client(file_client_args, path)
self.path = path
assert self.file_client.isdir(path)
files = list(
self.file_client.list_dir_or_file(
path,
list_dir=False,
list_file=True,
recursive=False,
))
for file in files:
if self.is_valid_file(self.file_client.join_path(file)):
path = self.file_client.join_path(self.path, file)
self.samples.append(path)
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
sample = self.samples[index]
image = mmcv.imread(sample)
data = self.task_processor.create_input(image)
return data[0]
def is_valid_file(self, filename: str) -> bool:
"""Check if a file is a valid sample."""
return filename.lower().endswith(self.extensions)
def get_table(onnx_path: str,
deploy_cfg: Config,
model_cfg: Config,
output_onnx_path: str,
output_quant_table_path: str,
image_dir: str = None,
device: str = 'cuda',
dataset_type: str = 'val'):
input_shape = None
# setup input_shape if existed in `onnx_config`
if 'onnx_config' in deploy_cfg and 'input_shape' in deploy_cfg.onnx_config:
input_shape = deploy_cfg.onnx_config.input_shape
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
calib_dataloader = deepcopy(model_cfg[f'{dataset_type}_dataloader'])
calib_dataloader['batch_size'] = 1
# build calibration dataloader. If img dir not specified, use val dataset.
if image_dir is not None:
from quant_image_dataset import QuantizationImageDataset
dataset = QuantizationImageDataset(
path=image_dir, deploy_cfg=deploy_cfg, model_cfg=model_cfg)
def collate(data_batch):
return data_batch[0]
dataloader = DataLoader(dataset, batch_size=1, collate_fn=collate)
else:
dataset = task_processor.build_dataset(calib_dataloader['dataset'])
calib_dataloader['dataset'] = dataset
dataloader = task_processor.build_dataloader(calib_dataloader)
data_preprocessor = task_processor.build_data_preprocessor()
# get an available input shape randomly
for _, input_data in enumerate(dataloader):
input_data = data_preprocessor(input_data)
input_tensor = input_data['inputs']
input_shape = input_tensor.shape
collate_fn = lambda x: data_preprocessor(x)['inputs'].to( # noqa: E731
device)
from ppq import QuantizationSettingFactory, TargetPlatform
from ppq.api import export_ppq_graph, quantize_onnx_model
# settings for ncnn quantization
quant_setting = QuantizationSettingFactory.default_setting()
quant_setting.equalization = False
quant_setting.dispatcher = 'conservative'
# quantize the model
quantized = quantize_onnx_model(
onnx_import_file=onnx_path,
calib_dataloader=dataloader,
calib_steps=max(8, min(512, len(dataset))),
input_shape=input_shape,
setting=quant_setting,
collate_fn=collate_fn,
platform=TargetPlatform.NCNN_INT8,
device=device,
verbose=1)
# export quantized graph and quant table
export_ppq_graph(
graph=quantized,
platform=TargetPlatform.NCNN_INT8,
graph_save_to=output_onnx_path,
config_save_to=output_quant_table_path)
return | null |
188,877 | import argparse
import logging
from copy import deepcopy
from mmengine import Config
from torch.utils.data import DataLoader
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
def parse_args():
parser = argparse.ArgumentParser(
description='Generate ncnn quant table from ONNX.')
parser.add_argument('--onnx', help='ONNX model path')
parser.add_argument('--deploy-cfg', help='Input deploy config path')
parser.add_argument('--model-cfg', help='Input model config path')
parser.add_argument('--out-onnx', help='Output onnx path')
parser.add_argument('--out-table', help='Output quant table path')
parser.add_argument(
'--image-dir',
type=str,
default=None,
help='Calibration Image Directory.')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args | null |
188,878 | import argparse
import logging
from mmdeploy.apis.ncnn import from_onnx
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Convert ONNX to ncnn.')
parser.add_argument('onnx_path', help='ONNX model path')
parser.add_argument('output_prefix', help='output ncnn model path')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args | null |
188,879 | import argparse
import logging
from mmdeploy.apis.snpe import from_onnx
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='Convert ONNX to snpe dlc format.')
parser.add_argument('onnx_path', help='ONNX model path')
parser.add_argument('output_prefix', help='output snpe dlc model path')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args | null |
188,880 | from mmcv.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdeploy
from mmdeploy.utils import get_codebase_version, get_root_logger
The provided code snippet includes necessary dependencies for implementing the `collect_env` function. Write a Python function `def collect_env()` to solve the following problem:
Collect the information of the running environments.
Here is the function:
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDeploy'] = f'{mmdeploy.__version__}+{get_git_hash()[:7]}'
return env_info | Collect the information of the running environments. |
188,881 | from mmcv.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdeploy
from mmdeploy.utils import get_codebase_version, get_root_logger
def check_backend():
from mmdeploy.backend.base import get_backend_manager
from mmdeploy.utils import Backend
exclude_backend_lists = [Backend.DEFAULT, Backend.PYTORCH, Backend.SDK]
backend_lists = [
backend for backend in Backend if backend not in exclude_backend_lists
]
for backend in backend_lists:
backend_mgr = get_backend_manager(backend.value)
backend_mgr.check_env(logger.info) | null |
188,882 | from mmcv.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdeploy
from mmdeploy.utils import get_codebase_version, get_root_logger
def check_codebase():
codebase_versions = get_codebase_version()
for k, v in codebase_versions.items():
logger.info(f'{k}:\t{v}') | null |
188,883 | import argparse
import logging
import os
import os.path as osp
from functools import partial
import mmengine
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_input_data, extract_model,
get_predefined_partition_cfg, torch2onnx,
torch2torchscript, visualize_model)
from mmdeploy.apis.core import PIPELINE_MANAGER
from mmdeploy.apis.utils import to_backend
from mmdeploy.backend.sdk.export_info import export2SDK
from mmdeploy.utils import (IR, Backend, get_backend, get_calib_filename,
get_ir_config, get_partition_config,
get_root_logger, load_config, target_wrapper)
def parse_args():
parser = argparse.ArgumentParser(description='Export model to backends.')
parser.add_argument('deploy_cfg', help='deploy config path')
parser.add_argument('model_cfg', help='model config path')
parser.add_argument('checkpoint', help='model checkpoint path')
parser.add_argument('img', help='image used to convert model model')
parser.add_argument(
'--test-img',
default=None,
type=str,
nargs='+',
help='image used to test model')
parser.add_argument(
'--work-dir',
default=os.getcwd(),
help='the dir to save logs and models')
parser.add_argument(
'--calib-dataset-cfg',
help='dataset config path used to calibrate in int8 mode. If not \
specified, it will use "val" dataset in model config instead.',
default=None)
parser.add_argument(
'--device', help='device used for conversion', default='cpu')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
parser.add_argument(
'--show', action='store_true', help='Show detection outputs')
parser.add_argument(
'--dump-info', action='store_true', help='Output information for SDK')
parser.add_argument(
'--quant-image-dir',
default=None,
help='Image directory for quantize model.')
parser.add_argument(
'--quant', action='store_true', help='Quantize model to low bit.')
parser.add_argument(
'--uri',
default='192.168.1.1:60000',
help='Remote ipv4:port or ipv6:port for inference on edge device.')
args = parser.parse_args()
return args | null |
188,884 | import argparse
import logging
import os
import os.path as osp
from functools import partial
import mmengine
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_input_data, extract_model,
get_predefined_partition_cfg, torch2onnx,
torch2torchscript, visualize_model)
from mmdeploy.apis.core import PIPELINE_MANAGER
from mmdeploy.apis.utils import to_backend
from mmdeploy.backend.sdk.export_info import export2SDK
from mmdeploy.utils import (IR, Backend, get_backend, get_calib_filename,
get_ir_config, get_partition_config,
get_root_logger, load_config, target_wrapper)
def create_process(name, target, args, kwargs, ret_value=None):
logger = get_root_logger()
logger.info(f'{name} start.')
log_level = logger.level
wrap_func = partial(target_wrapper, target, log_level, ret_value)
process = Process(target=wrap_func, args=args, kwargs=kwargs)
process.start()
process.join()
if ret_value is not None:
if ret_value.value != 0:
logger.error(f'{name} failed.')
exit(1)
else:
logger.info(f'{name} success.') | null |
188,885 | import argparse
import logging
import os
import os.path as osp
from functools import partial
import mmengine
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_input_data, extract_model,
get_predefined_partition_cfg, torch2onnx,
torch2torchscript, visualize_model)
from mmdeploy.apis.core import PIPELINE_MANAGER
from mmdeploy.apis.utils import to_backend
from mmdeploy.backend.sdk.export_info import export2SDK
from mmdeploy.utils import (IR, Backend, get_backend, get_calib_filename,
get_ir_config, get_partition_config,
get_root_logger, load_config, target_wrapper)
The provided code snippet includes necessary dependencies for implementing the `torch2ir` function. Write a Python function `def torch2ir(ir_type: IR)` to solve the following problem:
Return the conversion function from torch to the intermediate representation. Args: ir_type (IR): The type of the intermediate representation.
Here is the function:
def torch2ir(ir_type: IR):
"""Return the conversion function from torch to the intermediate
representation.
Args:
ir_type (IR): The type of the intermediate representation.
"""
if ir_type == IR.ONNX:
return torch2onnx
elif ir_type == IR.TORCHSCRIPT:
return torch2torchscript
else:
raise KeyError(f'Unexpected IR type {ir_type}') | Return the conversion function from torch to the intermediate representation. Args: ir_type (IR): The type of the intermediate representation. |
188,886 | import argparse
import logging
import os
import os.path as osp
from mmdeploy.apis import (extract_model, get_predefined_partition_cfg,
torch2onnx)
from mmdeploy.utils import (get_ir_config, get_partition_config,
get_root_logger, load_config)
def parse_args():
parser = argparse.ArgumentParser(description='Export model to ONNX.')
parser.add_argument('deploy_cfg', help='deploy config path')
parser.add_argument('model_cfg', help='model config path')
parser.add_argument('checkpoint', help='model checkpoint path')
parser.add_argument('img', help='image used to convert model model')
parser.add_argument(
'--work-dir',
default='./work-dir',
help='Directory to save output files.')
parser.add_argument(
'--device', help='device used for conversion', default='cpu')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args | null |
188,887 | import argparse
import logging
import os.path as osp
from copy import deepcopy
from typing import Optional, Sequence
import h5py
import tqdm
from mmengine import Config
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
def get_tensor_func(model, input_data):
input_data = model.data_preprocessor(input_data)
return input_data['inputs']
def process_model_config(model_cfg: Config,
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (Config): The model config.
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
Config: the model config after processing.
"""
cfg = model_cfg.copy()
pipeline = cfg.test_pipeline
for i, transform in enumerate(pipeline):
# for static exporting
if transform.type == 'Resize':
pipeline[i].keep_ratio = False
pipeline[i].scale = tuple(input_shape)
if transform.type in ('YOLOv5KeepRatioResize', 'LetterResize'):
pipeline[i].scale = tuple(input_shape)
pipeline = [
transform for transform in pipeline
if transform.type != 'LoadAnnotations'
]
cfg.test_pipeline = pipeline
return cfg
def get_quant(deploy_cfg: Config,
model_cfg: Config,
shape_dict: dict,
checkpoint_path: str,
work_dir: str,
device: str = 'cpu',
dataset_type: str = 'val'):
model_shape = list(shape_dict.values())[0]
model_cfg = process_model_config(model_cfg,
(model_shape[3], model_shape[2]))
task_processor = build_task_processor(model_cfg, deploy_cfg, device)
model = task_processor.build_pytorch_model(checkpoint_path)
calib_dataloader = deepcopy(model_cfg[f'{dataset_type}_dataloader'])
calib_dataloader['batch_size'] = 1
dataloader = task_processor.build_dataloader(calib_dataloader)
output_quant_dataset_path = osp.join(work_dir, 'calib_data.h5')
with h5py.File(output_quant_dataset_path, mode='w') as file:
calib_data_group = file.create_group('calib_data')
input_data_group = calib_data_group.create_group('input')
# get an available input shape randomly
for data_id, input_data in enumerate(tqdm.tqdm(dataloader)):
# input_data = data_preprocessor(input_data)['inputs'].numpy()
input_data = get_tensor_func(model, input_data).numpy()
calib_data_shape = input_data.shape
assert model_shape[2] >= calib_data_shape[2] and model_shape[
3] >= calib_data_shape[
3], f'vacc backend model shape is {tuple(model_shape[2:])}, \
the calib_data shape {calib_data_shape[2:]} is bigger'
input_data_group.create_dataset(
str(data_id),
shape=input_data.shape,
compression='gzip',
compression_opts=4,
data=input_data) | null |
188,888 | import argparse
import logging
import os.path as osp
from copy import deepcopy
from typing import Optional, Sequence
import h5py
import tqdm
from mmengine import Config
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_root_logger, load_config
def parse_args():
parser = argparse.ArgumentParser(
description='Generate vacc quant dataset from ONNX.')
parser.add_argument('--deploy-cfg', help='Input deploy config path')
parser.add_argument('--model-cfg', help='Input model config path')
parser.add_argument('--shape-dict', help='Input model shape')
parser.add_argument('--checkpoint-path', help='checkpoint path')
parser.add_argument('--work-dir', help='Output quant dataset dir')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args | null |
188,889 | import argparse
import logging
import os.path as osp
import onnx
import onnx.helper
from mmdeploy.apis.onnx import extract_partition
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='Extract model based on markers.')
parser.add_argument('input_model', help='Input ONNX model')
parser.add_argument('output_model', help='Output ONNX model')
parser.add_argument(
'--start',
help='Start markers, format: func:type, e.g. backbone:input')
parser.add_argument('--end', help='End markers')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
args.start = args.start.split(',') if args.start else []
args.end = args.end.split(',') if args.end else []
return args | null |
188,890 | import argparse
import logging
import os.path as osp
import onnx
import onnx.helper
from mmdeploy.apis.onnx import extract_partition
from mmdeploy.utils import get_root_logger
def collect_avaiable_marks(model):
marks = []
for node in model.graph.node:
if node.op_type == 'Mark':
for attr in node.attribute:
if attr.name == 'func':
func = str(onnx.helper.get_attribute_value(attr), 'utf-8')
if func not in marks:
marks.append(func)
return marks | null |
188,891 | import argparse
import os
import os.path as osp
import pathlib
import shutil
import subprocess
from glob import glob
import mmcv
import yaml
from mmdeploy.backend.sdk.export_info import (get_preprocess,
get_transform_static)
from mmdeploy.utils import get_root_logger, load_config
def parse_args():
parser = argparse.ArgumentParser(description='Extract transform.')
parser.add_argument(
'root_path', help='parent path to codebase(mmdetection for example)')
args = parser.parse_args()
return args | null |
188,892 | import argparse
import os
import os.path as osp
import pathlib
import shutil
import subprocess
from glob import glob
import mmcv
import yaml
from mmdeploy.backend.sdk.export_info import (get_preprocess,
get_transform_static)
from mmdeploy.utils import get_root_logger, load_config
MMDEPLOY_PATH = pathlib.Path(__file__).parent.parent.parent.resolve()
DEPLOY_CFG = {
'Image Classification': 'configs/mmpretrain/classification_tensorrt_dynamic-224x224-224x224.py',
'Object Detection': 'configs/mmdet/detection/detection_tensorrt_static-800x1344.py',
'Instance Segmentation': 'configs/mmdet/instance-seg/instance-seg_tensorrt_static-800x1344.py',
'Semantic Segmentation': 'configs/mmseg/segmentation_tensorrt_static-512x512.py',
'Oriented Object Detection': 'configs/mmrotate/rotated-detection_tensorrt-fp16_dynamic-320x320-1024x1024.py',
'Text Recognition': 'configs/mmocr/text-recognition/text-recognition_tensorrt_static-32x32.py',
'Text Detection': 'configs/mmocr/text-detection/text-detection_tensorrt_static-512x512.py',
'Restorers': 'configs/mmagic/super-resolution/super-resolution_tensorrt_static-256x256.py'
}
def extract_one_model(deploy_cfg_, model_cfg_, args):
deploy_cfg, model_cfg = load_config(deploy_cfg_, model_cfg_)
preprocess = get_preprocess(deploy_cfg, model_cfg, 'cuda')
preprocess['model_cfg'] = model_cfg_
transform_static, tag = get_transform_static(preprocess['transforms'])
if tag is not None:
generate_source_code(preprocess, transform_static, tag, args)
def extract_one_metafile(metafile, codebase, args):
with open(metafile, encoding='utf-8') as f:
yaml_info = yaml.load(f, Loader=yaml.FullLoader)
known_task = list(DEPLOY_CFG.keys())
for model in yaml_info['Models']:
try:
cfg = model['Config']
task_name = model['Results'][0]['Task']
if task_name not in known_task:
continue
deploy_cfg = osp.join(MMDEPLOY_PATH, DEPLOY_CFG[task_name])
model_cfg = osp.join(args.root_path, codebase, cfg)
extract_one_model(deploy_cfg, model_cfg, args)
except Exception:
pass | null |
188,893 | import argparse
import collections
import logging
from mmdeploy.apis.pplnn import from_onnx
from mmdeploy.utils import get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Convert ONNX to PPLNN.')
parser.add_argument('onnx_path', help='ONNX model path')
parser.add_argument(
'output_prefix', help='output PPLNN algorithm prefix in json format')
parser.add_argument(
'--device',
help='`the device of model during conversion',
default='cuda:0')
parser.add_argument(
'--opt-shapes',
help='`Optical shapes for PPLNN optimization. The shapes must be able'
'to be evaluated by python, e,g., `[1, 3, 224, 224]`',
default='[1, 3, 224, 224]')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
args = parser.parse_args()
return args | null |
188,894 | import argparse
import glob
import os.path as osp
import numpy as np
import torch
from mmengine import DictAction
from prettytable import PrettyTable
from mmdeploy.apis import build_task_processor
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import (Backend, get_backend, get_input_shape,
load_config)
from mmdeploy.utils.timer import TimeCounter
def parse_args():
parser = argparse.ArgumentParser(
description='MMDeploy Model Latency Test Tool.')
parser.add_argument('deploy_cfg', help='Deploy config path')
parser.add_argument('model_cfg', help='Model config path')
parser.add_argument('image_dir', help='Input directory to image files')
parser.add_argument(
'--model', type=str, nargs='+', help='Input model files.')
parser.add_argument(
'--device', help='device type for inference', default='cuda:0')
parser.add_argument(
'--shape',
type=str,
help='Input shape to test in `HxW` format, e.g., `800x1344`',
default=None)
parser.add_argument(
'--warmup',
type=int,
help='warmup iterations before counting inference latency.',
default=10)
parser.add_argument(
'--num-iter',
type=int,
help='Number of iterations to run the inference.',
default=100)
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--batch-size', type=int, default=1, help='the batch size for test.')
parser.add_argument(
'--img-ext',
type=str,
nargs='+',
help='the file extensions for input images from `image_dir`.',
default=['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'])
args = parser.parse_args()
return args | null |
188,895 | import argparse
import glob
import os.path as osp
import numpy as np
import torch
from mmengine import DictAction
from prettytable import PrettyTable
from mmdeploy.apis import build_task_processor
from mmdeploy.utils import get_root_logger
from mmdeploy.utils.config_utils import (Backend, get_backend, get_input_shape,
load_config)
from mmdeploy.utils.timer import TimeCounter
def get_images(image_dir, extensions):
images = []
files = glob.glob(osp.join(image_dir, '**', '*'), recursive=True)
for f in files:
_, ext = osp.splitext(f)
if ext.lower() in extensions:
images.append(f)
return images | null |
188,896 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_pplcv(dep_dir, build_cuda):
print('-' * 10 + 'install pplcv' + '-' * 10)
time.sleep(2)
os.chdir(dep_dir)
pplcv_dir = os.path.join(dep_dir, 'ppl.cv')
# git clone
if not os.path.exists(pplcv_dir):
os.system(
'git clone --depth 1 --branch v0.7.0 https://github.com/openppl-public/ppl.cv/' # noqa: E501
)
# build
os.chdir(pplcv_dir)
if build_cuda is True:
os.system('./build.sh cuda')
pplcv_cmake_dir = os.path.join(pplcv_dir,
'cuda-build/install/lib/cmake/ppl')
else:
os.system('./build.sh x86_64')
pplcv_cmake_dir = os.path.join(pplcv_dir,
'x86-64-build/install/lib/cmake/ppl')
print('\n')
return pplcv_cmake_dir | null |
188,897 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_pplnn(dep_dir, build_cuda):
print('-' * 10 + 'install pplnn' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
pplnn_dir = os.path.join(dep_dir, 'ppl.nn')
# git clone
if not os.path.exists(pplnn_dir):
os.system(
'git clone --depth 1 --branch v0.8.2 https://github.com/openppl-public/ppl.nn/' # noqa: E501
)
# build
os.chdir(pplnn_dir)
if build_cuda is True:
os.system(
'./build.sh -DPPLNN_USE_CUDA=ON -DPPLNN_USE_X86_64=ON -DPPLNN_ENABLE_PYTHON_API=ON' # noqa: E501
)
else:
os.system(
'./build.sh -DPPLNN_USE_X86_64=ON -DPPLNN_ENABLE_PYTHON_API=ON' # noqa: E501
)
os.system('cd python/package && ./build.sh')
os.system(
'cd /tmp/pyppl-package/dist && python3 -m pip install pyppl*.whl --force-reinstall --user' # noqa: E501
)
pplnn_cmake_dir = os.path.join(pplnn_dir,
'pplnn-build/install/lib/cmake/ppl')
print('\n')
return pplnn_cmake_dir | null |
188,898 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_mmdeploy(work_dir, pplnn_cmake_dir, pplcv_cmake_dir, build_cuda):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=pplnn '
if build_cuda is True:
cmd += ' -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" '
else:
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -Dpplcv_DIR={} '.format(pplcv_cmake_dir)
cmd += ' -Dpplnn_DIR={} '.format(pplnn_cmake_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -e .')
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ∩▽∩')
return 0 | null |
188,899 | import os
import os.path as osp
import sys
import time
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def cmd_result(txt: str):
def install_llvm(dep_dir):
print('-' * 10 + 'install llvm' + '-' * 10)
os.chdir(dep_dir)
os.system(
'wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -' # noqa: E501
)
ubuntu = cmd_result(
""" lsb_release -a 2>/dev/null | grep "Release" | tail -n 1 | awk '{print $NF}' """ # noqa: E501
)
nickname_dict = {
'18.04': 'bionic',
'20.04': 'focal',
'22.04': 'jammy',
'22.10': 'kinetic'
}
nickname = nickname_dict.get(ubuntu, None)
if nickname is None:
raise NotImplementedError(f'Unsupported ubuntu version {ubuntu}.')
os.system(
f"add-apt-repository 'deb http://apt.llvm.org/{nickname}/ llvm-toolchain-{nickname}-10 main'" # noqa: E501
)
os.system('sudo apt update')
os.system(
'sudo apt-get install llvm-10 lldb-10 llvm-10-dev libllvm10 llvm-10-runtime' # noqa: E501
) | null |
188,900 | import os
import os.path as osp
import sys
import time
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_tvm(dep_dir):
print('-' * 10 + 'build and install tvm' + '-' * 10)
time.sleep(2)
os.system('sudo apt-get update')
os.system(
'sudo apt-get install -y python3 python3-dev python3-setuptools gcc libtinfo-dev zlib1g-dev build-essential cmake libedit-dev libxml2-dev' # noqa: E501
)
# generate unzip and build dir
os.chdir(dep_dir)
# git clone
if not osp.exists('tvm'):
os.system(
'git clone --branch v0.10.0 --depth 1 --recursive https://github.com/apache/tvm tvm' # noqa: E501
)
tvm_dir = osp.join(dep_dir, 'tvm')
os.chdir(tvm_dir)
# build
if not osp.exists('build'):
os.system('mkdir build')
os.system('cp cmake/config.cmake build')
os.chdir(osp.join(tvm_dir, 'build'))
os.system(
""" sed -i "s@set(USE_LLVM OFF)@set(USE_LLVM /usr/bin/llvm-config-10)@g" config.cmake """ # noqa: E501
)
os.system('cmake .. && make -j {} && make runtime'.format(g_jobs))
# set env
os.system(
""" echo 'export LD_LIBRARY_PATH={}:$LD_LIBRARY_PATH' >> ~/mmdeploy.env """ # noqa: E501
.format(os.path.join(tvm_dir, 'build')))
# install python package
os.chdir(osp.join(tvm_dir, 'python'))
os.system(""" python3 setup.py install --user """)
# install dependency
os.system(
""" python3 -m pip install xgboost decorator psutil scipy attrs tornado """ # noqa: E501
)
return tvm_dir | null |
188,901 | import os
import os.path as osp
import sys
import time
from ubuntu_utils import cmd_result, ensure_base_env, get_job
def install_mmdeploy(work_dir, tvm_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=tvm '
cmd += ' -DTVM_DIR={} '.format(tvm_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -v -e .')
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(work_dir, 'mmdeploy', 'backend', 'tvm')))
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later...')
return 0 | null |
188,902 | import os
import re
import time
def cmd_result(txt: str):
cmd = os.popen(txt)
return cmd.read().rstrip().lstrip()
def get_job(argv) -> int:
# get nprocs, if user not specified, use max(1, nproc-2)
job = 2
if len(argv) <= 1:
print('your can use `python3 {} N` to set make -j [N]'.format(argv[0]))
nproc = cmd_result('nproc')
if nproc is not None and len(nproc) > 0:
job = max(int(nproc) - 2, 1)
else:
job = int(argv[1])
return job | null |
188,903 | import os
import re
import time
def version_minor(txt: str) -> int:
return int(txt.split('.')[1]) | null |
188,904 | import os
import re
import time
def cmd_result(txt: str):
def version_major(txt: str) -> int:
def simple_check_install(bin: str, sudo: str) -> str:
def ensure_base_env(work_dir, dep_dir):
description = """
check python, root, pytorch version, auto install these binary:
* make
* g++
* git
* wget
* unzip
* opencv
* mmcv (not compulsory)
"""
print('-' * 10 + 'ensure base env' + '-' * 10)
print(description)
os.system('python3 -m ensurepip')
os.system('python3 -m pip install wheel')
sudo = 'sudo'
if 'root' in cmd_result('whoami'):
sudo = ''
# check ubuntu
ubuntu = cmd_result(
""" lsb_release -a 2>/dev/null | grep "Release" | tail -n 1 | awk '{print $NF}' """ # noqa: E501
)
# check cmake version
cmake = cmd_result('which cmake')
if cmake is None or len(cmake) < 1:
print('cmake not found, try install cmake ..', end='')
os.system('python3 -m pip install cmake')
cmake = cmd_result('which cmake')
if cmake is None or len(cmake) < 1:
env = 'export PATH=${PATH}:~/.local/bin'
os.system(env)
os.system(""" echo '{}' >> ~/mmdeploy.env """.format(env))
cmake = cmd_result('which cmake')
if cmake is None or len(cmake) < 1:
print('Check cmake failed.')
return -1
print('success')
# check make
make = cmd_result('which make')
if make is None or len(make) < 1:
print('make not found, try install make ..', end='')
os.system('{} apt update --fix-missing'.format(sudo))
os.system(
'{} DEBIAN_FRONTEND="noninteractive" apt install make'.format(
sudo))
make = cmd_result('which make')
if make is None or len(make) < 1:
print('Check make failed.')
return -1
print('success')
# check g++ version
gplus = cmd_result('which g++')
if gplus is None or len(gplus) < 1:
# install g++
print('g++ not found, try install g++ ..', end='')
os.system(
'{} DEBIAN_FRONTEND="noninteractive" apt install software-properties-common -y' # noqa: E501
.format(sudo)) # noqa: E501
os.system('{} apt update'.format(sudo))
if ubuntu is None or len(ubuntu) < 1 or version_major(ubuntu) <= 18:
os.system(
'{} add-apt-repository ppa:ubuntu-toolchain-r/test -y'.format(
sudo))
os.system('{} apt install gcc g++ -y'.format(sudo))
gplus = cmd_result('which g++')
if gplus is None or len(gplus) < 1:
print('Check g++ failed.')
return -1
print('success')
# wget
wget = simple_check_install('wget', sudo)
# check torch and mmcv, we try to install mmcv, it is not compulsory
mmcv_version = None
torch_version = None
try:
import torch
torch_version = torch.__version__
try:
import mmcv
mmcv_version = mmcv.__version__
except Exception:
# install mmcv
print('mmcv not found, try install mmcv ..', end='')
os.system('python3 -m pip install -U openmim')
os.system('mim install mmcv-full==1.5.1')
except Exception:
pass
# git
git = simple_check_install('git', sudo)
# unzip
unzip = simple_check_install('unzip', sudo)
# opencv
ocv = cmd_result('which opencv_version')
if ocv is None or len(ocv) < 1:
print('ocv not found, try install ocv ..', end='')
os.system('{} apt update'.format(sudo))
pattern = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+')
upstream = cmd_result('{} apt list libopencv-dev -a'.format(sudo))
add_ppa = True
if upstream is not None and len(upstream) > 0:
versions = pattern.findall(upstream)
if versions is not None and len(versions) > 0:
version = versions[0]
major = int(version.split('.')[0])
if major >= 3:
# Directly install upstream OCV, do not need add ppa
add_ppa = False
if add_ppa:
os.system(
'{} add-apt-repository ppa:ignaciovizzo/opencv3-nonfree -y'.
format(sudo))
os.system(
'{} DEBIAN_FRONTEND="noninteractive" apt install libopencv-dev -y'
.format(sudo))
ocv = cmd_result('which opencv_version')
if ocv is None or len(ocv) < 1:
print('Check ocv failed.')
return -1
print('success')
# print all
print('ubuntu \t\t:{}'.format(ubuntu))
# check python
print('python bin\t:{}'.format(cmd_result('which python3')))
print('python version\t:{}'.format(
cmd_result("python3 --version | awk '{print $2}'")))
print('cmake bin\t:{}'.format(cmake))
print('cmake version\t:{}'.format(
cmd_result("cmake --version | head -n 1 | awk '{print $3}'")))
print('make bin\t:{}'.format(make))
print('make version\t:{}'.format(
cmd_result(" make --version | head -n 1 | awk '{print $3}' ")))
print('wget bin\t:{}'.format(wget))
print('g++ bin\t:{}'.format(gplus))
print('mmcv version\t:{}'.format(mmcv_version))
if mmcv_version is None:
print('\t please install mmcv later.')
time.sleep(2)
print('torch version\t:{}'.format(torch_version))
if torch_version is None:
print('\t please install pytorch later.')
time.sleep(2)
print('ocv version\t:{}'.format(cmd_result('opencv_version')))
print('git bin\t\t:{}'.format(git))
print('git version\t:{}'.format(
cmd_result("git --version | awk '{print $3}' ")))
print('unzip bin\t:{}'.format(unzip))
# work dir
print('work dir \t:{}'.format(work_dir))
# dep dir
print('dep dir \t:{}'.format(dep_dir))
print('\n')
return 0 | null |
188,905 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import ensure_base_env, get_job
def install_ort(dep_dir):
print('-' * 10 + 'install ort' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
# install python onnxruntime
os.system('python3 -m pip install onnxruntime==1.8.1')
# git clone
if not os.path.exists('onnxruntime-linux-x64-1.8.1'):
os.system(
'wget -q --show-progress https://github.com/microsoft/onnxruntime/releases/download/v1.8.1/onnxruntime-linux-x64-1.8.1.tgz' # noqa: E501
)
os.system('tar xvf onnxruntime-linux-x64-1.8.1.tgz')
ort_dir = os.path.join(dep_dir, 'onnxruntime-linux-x64-1.8.1')
print('onnxruntime dir \t:{}'.format(ort_dir))
print('\n')
return ort_dir | null |
188,906 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import ensure_base_env, get_job
g_jobs = 2
def install_mmdeploy(work_dir, ort_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=ort '
cmd += ' -DONNXRUNTIME_DIR={} '.format(ort_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -e .')
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ⊙▽⊙')
return 0 | null |
188,907 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import (cmd_result, cu_version_name, ensure_base_env,
get_job, pytorch_version)
def pytorch_version():
def cmd_result(txt: str):
def cu_version_name(version: str) -> str:
def install_libtorch(dep_dir):
print('-' * 10 + 'install libtorch' + '-' * 10)
time.sleep(2)
os.chdir(dep_dir)
unzipped_name = 'libtorch'
if os.path.exists(unzipped_name):
return os.path.join(dep_dir, unzipped_name)
torch_version = pytorch_version()
if torch_version is None:
print('torch version is None, try 1.11.0')
torch_version = '1.11.0'
version_name = None
# first check `nvcc` version, if failed, use `nvidia-smi`
cuda = cmd_result(
" nvcc --version | grep release | awk '{print $5}' | awk -F , '{print $1}' " # noqa: E501
)
if cuda is None or len(cuda) < 1:
cuda = cmd_result(" nvidia-smi | grep CUDA | awk '{print $9}' ")
if cuda is not None and len(cuda) > 0:
version_name = cu_version_name(cuda)
else:
version_name = 'cpu'
filename = 'libtorch-shared-with-deps-{}%2B{}.zip'.format(
torch_version, version_name)
url = 'https://download.pytorch.org/libtorch/{}/{}'.format(
version_name, filename)
os.system('wget -q --show-progress {} -O libtorch.zip'.format(url))
os.system('unzip libtorch.zip')
if not os.path.exists(unzipped_name):
print(
'download or unzip libtorch from {} failed, please check https://pytorch.org/get-started/locally/' # noqa: E501
.format(url))
return None
return os.path.join(dep_dir, unzipped_name) | null |
188,908 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import (cmd_result, cu_version_name, ensure_base_env,
get_job, pytorch_version)
g_jobs = 2
def install_mmdeploy(work_dir, libtorch_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && Torch_DIR={} cmake ..'.format(libtorch_dir)
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=torchscript '
cmd += ' -DTORCHSCRIPT_DIR={} '.format(libtorch_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -e .')
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ≥▽≤')
return 0 | null |
188,909 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def cmd_result(txt: str):
cmd = os.popen(txt)
return cmd.read().rstrip().lstrip()
The provided code snippet includes necessary dependencies for implementing the `install_protobuf` function. Write a Python function `def install_protobuf(dep_dir) -> int` to solve the following problem:
build and install protobuf. protobuf seems not support repeated install, so clean build first. Args: wor_dir (_type_): _description_ Returns: : _description_
Here is the function:
def install_protobuf(dep_dir) -> int:
"""build and install protobuf. protobuf seems not support repeated install,
so clean build first.
Args:
wor_dir (_type_): _description_
Returns:
: _description_
"""
print('-' * 10 + 'install protobuf' + '-' * 10)
os.chdir(dep_dir)
if not os.path.exists('protobuf-3.20.0'):
os.system(
'wget https://github.com/protocolbuffers/protobuf/releases/download/v3.20.0/protobuf-cpp-3.20.0.tar.gz' # noqa: E501
)
os.system('tar xvf protobuf-cpp-3.20.0.tar.gz')
os.chdir(os.path.join(dep_dir, 'protobuf-3.20.0'))
install_dir = os.path.join(dep_dir, 'pbinstall')
if os.path.exists(install_dir):
os.system('rm -rf {}'.format(install_dir))
os.system('make clean')
os.system('./configure --prefix={}'.format(install_dir))
os.system('make -j {} && make install'.format(g_jobs))
protoc = os.path.join(install_dir, 'bin', 'protoc')
print('protoc \t:{}'.format(cmd_result('{} --version'.format(protoc))))
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(install_dir, 'bin')))
os.system(
""" echo 'export LD_LIBRARY_PATH={}:$LD_LIBRARY_PATH' >> ~/mmdeploy.env """ # noqa: E501
.format(os.path.join(install_dir, 'lib')))
return 0 | build and install protobuf. protobuf seems not support repeated install, so clean build first. Args: wor_dir (_type_): _description_ Returns: : _description_ |
188,910 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_pyncnn(dep_dir):
print('-' * 10 + 'build and install pyncnn' + '-' * 10)
time.sleep(2)
# generate unzip and build dir
os.chdir(dep_dir)
# git clone
if not os.path.exists('ncnn'):
os.system(
'git clone --depth 1 --branch 20230816 https://github.com/tencent/ncnn && cd ncnn' # noqa: E501
)
ncnn_dir = os.path.join(dep_dir, 'ncnn')
os.chdir(ncnn_dir)
# update submodule pybind11, gslang not required
os.system('git submodule init && git submodule update python/pybind11')
# build
if not os.path.exists('build'):
os.system('mkdir build')
os.chdir(os.path.join(ncnn_dir, 'build'))
os.system('rm -rf CMakeCache.txt')
pb_install = os.path.join(dep_dir, 'pbinstall')
pb_bin = os.path.join(pb_install, 'bin', 'protoc')
pb_lib = os.path.join(pb_install, 'lib', 'libprotobuf.so')
pb_include = os.path.join(pb_install, 'include')
cmd = 'cmake .. '
cmd += ' -DNCNN_PYTHON=ON '
cmd += ' -DProtobuf_LIBRARIES={} '.format(pb_lib)
cmd += ' -DProtobuf_PROTOC_EXECUTABLE={} '.format(pb_bin)
cmd += ' -DProtobuf_INCLUDE_DIR={} '.format(pb_include)
cmd += ' && make -j {} '.format(g_jobs)
cmd += ' && make install '
os.system(cmd)
# install
os.chdir(ncnn_dir)
os.system('cd python && python -m pip install -e . --user --no-cache-dir')
ncnn_cmake_dir = os.path.join(ncnn_dir, 'build', 'install', 'lib', 'cmake',
'ncnn')
assert (os.path.exists(ncnn_cmake_dir))
print('ncnn cmake dir \t:{}'.format(ncnn_cmake_dir))
print('\n')
return ncnn_cmake_dir | null |
188,911 | import os
import sys
import time
from pathlib import Path
from ubuntu_utils import cmd_result, ensure_base_env, get_job
g_jobs = 2
def install_mmdeploy(work_dir, dep_dir, ncnn_cmake_dir):
print('-' * 10 + 'build and install mmdeploy' + '-' * 10)
time.sleep(3)
os.chdir(work_dir)
os.system('git submodule init')
os.system('git submodule update')
if not os.path.exists('build'):
os.system('mkdir build')
pb_install = os.path.join(dep_dir, 'pbinstall')
pb_bin = os.path.join(pb_install, 'bin', 'protoc')
pb_lib = os.path.join(pb_install, 'lib', 'libprotobuf.so')
pb_include = os.path.join(pb_install, 'include')
os.system('rm -rf build/CMakeCache.txt')
cmd = 'cd build && cmake ..'
cmd += ' -DMMDEPLOY_BUILD_SDK=ON '
cmd += ' -DMMDEPLOY_BUILD_EXAMPLES=ON '
cmd += ' -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON '
cmd += ' -DMMDEPLOY_TARGET_DEVICES=cpu '
cmd += ' -DMMDEPLOY_TARGET_BACKENDS=ncnn '
cmd += ' -DProtobuf_PROTOC_EXECUTABLE={} '.format(pb_bin)
cmd += ' -DProtobuf_LIBRARIES={} '.format(pb_lib)
cmd += ' -DProtobuf_INCLUDE_DIR={} '.format(pb_include)
cmd += ' -Dncnn_DIR={} '.format(ncnn_cmake_dir)
os.system(cmd)
os.system('cd build && make -j {} && make install'.format(g_jobs))
os.system('python3 -m pip install -v -e . --user --no-cache-dir')
os.system(""" echo 'export PATH={}:$PATH' >> ~/mmdeploy.env """.format(
os.path.join(work_dir, 'mmdeploy', 'backend', 'ncnn')))
try:
import mmcv
print(mmcv.__version__)
os.system('python3 tools/check_env.py')
except Exception:
print('Please install torch & mmcv later.. ╮(╯▽╰)╭')
return 0 | null |
188,912 | import argparse
import os
import sys
from distutils.util import get_platform
import yaml
def parse_arguments():
parser = argparse.ArgumentParser(
description='MMDeploy create build config')
parser.add_argument(
'--backend',
required=True,
type=str,
help='target backend. Eg: "ort;trt"')
parser.add_argument(
'--system',
required=True,
type=str,
help='target system, Eg: windows/linux/jetson')
parser.add_argument(
'--build-mmdeploy',
action='store_true',
help='whether build mmdeploy runtime package')
parser.add_argument(
'--build-sdk', action='store_true', help='whether build sdk c/cpp api')
parser.add_argument(
'--sdk-dynamic-net',
action='store_true',
help='whether build mmdeploy sdk dynamic net')
parser.add_argument('--device', type=str, help='target device. Eg: "cpu"')
parser.add_argument(
'--shared', action='store_true', help='whether build shared lib')
parser.add_argument(
'--build-sdk-monolithic',
action='store_true',
help='whether build sdk monolithic')
parser.add_argument(
'--build-sdk-python',
action='store_true',
help='whether build sdk python api')
parser.add_argument(
'--opencv-dir',
type=str,
help='opencv path that contains OpenCVConfig.cmake, '
'default use $ENV{OpenCV_DIR}')
parser.add_argument(
'--pplcv-dir',
type=str,
help='pplcv path that contains pplcv-config.cmake, '
'default use $ENV{pplcv_DIR}')
parser.add_argument(
'--onnxruntime-dir',
type=str,
help='onnxruntime root path, default use $ENV{ONNXRUNTIME_DIR}')
parser.add_argument(
'--tensorrt-dir',
type=str,
help='tensorrt root path, default use $ENV{TENSORRT_DIR}')
parser.add_argument(
'--cudnn-dir',
type=str,
help='cudnn root dir, default use $ENV{CUDNN_DIR}')
parser.add_argument('--cxx11abi', action='store_true', help='new cxxabi')
parser.add_argument(
'--output', required=True, type=str, help='output config file path')
return parser.parse_args() | null |
188,913 | import argparse
import os
import sys
from distutils.util import get_platform
import yaml
def generate_config(args):
config = {}
cmake_cfg = {}
# wheel platform tag
if args.system in ['linux']:
config['PLATFORM_TAG'] = 'manylinux2014_x86_64'
elif args.system in ['jetson']:
config['PLATFORM_TAG'] = 'any'
else:
config['PLATFORM_TAG'] = get_platform().replace('-',
'_').replace('.', '_')
config['BUILD_MMDEPLOY'] = 'ON' if args.build_mmdeploy else 'OFF'
# deps for mmdeploy
cmake_cfg['MMDEPLOY_TARGET_BACKENDS'] = args.backend
if 'ort' in args.backend:
if args.onnxruntime_dir:
cmake_cfg['ONNXRUNTIME_DIR'] = args.onnxruntime_dir
elif 'ONNXRUNTIME_DIR' in os.environ:
cmake_cfg['ONNXRUNTIME_DIR'] = os.environ['ONNXRUNTIME_DIR']
else:
raise Exception('please provide --onnxruntime-dir')
if 'trt' in args.backend:
if args.tensorrt_dir:
cmake_cfg['TENSORRT_DIR'] = args.tensorrt_dir
elif 'TENSORRT_DIR' in os.environ:
cmake_cfg['TENSORRT_DIR'] = os.environ['TENSORRT_DIR']
else:
raise Exception('please provide --tensorrt-dir')
if args.cudnn_dir:
cmake_cfg['CUDNN_DIR'] = args.cudnn_dir
elif 'CUDNN_DIR' in os.environ:
cmake_cfg['CUDNN_DIR'] = os.environ['CUDNN_DIR']
else:
raise Exception('please provide --cudnn-dir')
# deps for mmdeploy-python
if args.build_sdk:
cmake_cfg['MMDEPLOY_BUILD_SDK'] = 'ON'
cmake_cfg[
'MMDEPLOY_BUILD_SDK_MONOLITHIC'] = 'ON' \
if args.build_sdk_monolithic else 'OFF'
cmake_cfg[
'MMDEPLOY_BUILD_SDK_PYTHON_API'] = 'ON' \
if args.build_sdk_python else 'OFF'
cmake_cfg['MMDEPLOY_SHARED_LIBS'] = 'ON' if args.shared else 'OFF'
cmake_cfg['MMDEPLOY_TARGET_DEVICES'] = args.device
cmake_cfg[
'MMDEPLOY_DYNAMIC_BACKEND'] = 'ON' \
if args.sdk_dynamic_net else 'OFF'
cmake_cfg['MMDEPLOY_ZIP_MODEL'] = 'ON'
if args.opencv_dir:
cmake_cfg['OpenCV_DIR'] = args.opencv_dir
elif 'OpenCV_DIR' in os.environ:
cmake_cfg['OpenCV_DIR'] = os.environ['OpenCV_DIR']
else:
raise Exception('please provide --opencv-dir')
if args.device == 'cuda':
if args.pplcv_dir:
cmake_cfg['pplcv_DIR'] = args.pplcv_dir
elif 'pplcv_DIR' in os.environ:
cmake_cfg['pplcv_DIR'] = os.environ['pplcv_DIR']
else:
raise Exception('please provide --pplcv-dir')
# sdk package template
if args.system in ['windows', 'linux']:
name = 'mmdeploy-{mmdeploy_v}-{system}-{machine}'
if args.cxx11abi:
name = name + '-cxx11abi'
if args.device == 'cpu':
pass
elif args.device == 'cuda':
name = '{}-cuda'.format(name) + '{cuda_v}'
else:
raise Exception('unsupported device')
config['BUILD_SDK_NAME'] = name
elif args.system == 'jetson':
config['BUILD_SDK_NAME'] = 'mmdeploy-{mmdeploy_v}-jetson-{machine}'
else:
raise Exception('unsupported system')
else:
cmake_cfg['MMDEPLOY_BUILD_SDK'] = 'OFF'
cmake_cfg['MMDEPLOY_BUILD_SDK_PYTHON_API'] = 'OFF'
config['cmake_cfg'] = cmake_cfg
return config | null |
188,914 | import argparse
import copy
import logging
import os
import os.path as osp
import platform
import re
import shutil
import sys
from glob import glob
from subprocess import check_output, run
from typing import Dict
import yaml
from packaging import version
def create_mmdeploy(cfg: Dict, work_dir: str):
def create_mmdeploy_runtime(cfg: Dict, work_dir: str):
def create_sdk(cfg: Dict, work_dir: str):
def create_package(cfg: Dict, work_dir: str):
create_mmdeploy(cfg, work_dir)
create_sdk(cfg, work_dir)
create_mmdeploy_runtime(cfg, work_dir) | null |
188,915 | import argparse
import copy
import logging
import os
import os.path as osp
import platform
import re
import shutil
import sys
from glob import glob
from subprocess import check_output, run
from typing import Dict
import yaml
from packaging import version
def parse_args():
parser = argparse.ArgumentParser(description='Build mmdeploy from yaml.')
parser.add_argument('--config', help='The build config yaml file.')
parser.add_argument(
'--output-dir', default='.', help='Output package directory.')
args = parser.parse_args()
return args | null |
188,916 | import argparse
import copy
import logging
import os
import os.path as osp
import platform
import re
import shutil
import sys
from glob import glob
from subprocess import check_output, run
from typing import Dict
import yaml
from packaging import version
def parse_configs(cfg_path: str):
with open(cfg_path, mode='r') as f:
config = yaml.load(f, yaml.Loader)
logging.info(f'Load config\n{yaml.dump(config)}')
return config | null |
188,917 | import os
import os.path as osp
import platform
import sys
version_file = osp.join(CURDIR, 'mmdeploy_runtime', 'version.py')
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
188,918 | import os
import os.path as osp
import platform
import sys
def get_platform_name():
return platform.machine() | null |
188,919 | import os
import os.path as osp
import platform
import sys
def parse_arg_remove_boolean(argv, arg_name):
arg_value = False
if arg_name in sys.argv:
arg_value = True
argv.remove(arg_name)
return arg_value | null |
188,920 | import config
import tiktoken
import openai
openai.api_key = config.openai_api_key
async def is_content_acceptable(prompt):
r = await openai.Moderation.acreate(input=prompt)
return not all(r.results[0].categories.values()) | null |
188,921 | import io
import logging
import asyncio
import traceback
import html
import json
from datetime import datetime
import openai
import telegram
from telegram import (
Update,
User,
InlineKeyboardButton,
InlineKeyboardMarkup,
BotCommand
)
from telegram.ext import (
Application,
ApplicationBuilder,
CallbackContext,
CommandHandler,
MessageHandler,
CallbackQueryHandler,
AIORateLimiter,
filters
)
from telegram.constants import ParseMode, ChatAction
import config
import database
import openai_utils
async def start_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
db.start_new_dialog(user_id)
reply_text = "Hi! I'm <b>ChatGPT</b> bot implemented with OpenAI API 🤖\n\n"
reply_text += HELP_MESSAGE
await update.message.reply_text(reply_text, parse_mode=ParseMode.HTML)
await show_chat_modes_handle(update, context)
async def help_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
await update.message.reply_text(HELP_MESSAGE, parse_mode=ParseMode.HTML)
async def help_group_chat_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
text = HELP_GROUP_CHAT_MESSAGE.format(bot_username="@" + context.bot.username)
await update.message.reply_text(text, parse_mode=ParseMode.HTML)
await update.message.reply_video(config.help_group_chat_video_path)
async def retry_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
if await is_previous_message_not_answered_yet(update, context): return
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
dialog_messages = db.get_dialog_messages(user_id, dialog_id=None)
if len(dialog_messages) == 0:
await update.message.reply_text("No message to retry 🤷♂️")
return
last_dialog_message = dialog_messages.pop()
db.set_dialog_messages(user_id, dialog_messages, dialog_id=None) # last message was removed from the context
await message_handle(update, context, message=last_dialog_message["user"], use_new_dialog_timeout=False)
async def message_handle(update: Update, context: CallbackContext, message=None, use_new_dialog_timeout=True):
# check if bot was mentioned (for group chats)
if not await is_bot_mentioned(update, context):
return
# check if message is edited
if update.edited_message is not None:
await edited_message_handle(update, context)
return
_message = message or update.message.text
# remove bot mention (in group chats)
if update.message.chat.type != "private":
_message = _message.replace("@" + context.bot.username, "").strip()
await register_user_if_not_exists(update, context, update.message.from_user)
if await is_previous_message_not_answered_yet(update, context): return
user_id = update.message.from_user.id
chat_mode = db.get_user_attribute(user_id, "current_chat_mode")
if chat_mode == "artist":
await generate_image_handle(update, context, message=message)
return
async def message_handle_fn():
# new dialog timeout
if use_new_dialog_timeout:
if (datetime.now() - db.get_user_attribute(user_id, "last_interaction")).seconds > config.new_dialog_timeout and len(db.get_dialog_messages(user_id)) > 0:
db.start_new_dialog(user_id)
await update.message.reply_text(f"Starting new dialog due to timeout (<b>{config.chat_modes[chat_mode]['name']}</b> mode) ✅", parse_mode=ParseMode.HTML)
db.set_user_attribute(user_id, "last_interaction", datetime.now())
# in case of CancelledError
n_input_tokens, n_output_tokens = 0, 0
current_model = db.get_user_attribute(user_id, "current_model")
try:
# send placeholder message to user
placeholder_message = await update.message.reply_text("...")
# send typing action
await update.message.chat.send_action(action="typing")
if _message is None or len(_message) == 0:
await update.message.reply_text("🥲 You sent <b>empty message</b>. Please, try again!", parse_mode=ParseMode.HTML)
return
dialog_messages = db.get_dialog_messages(user_id, dialog_id=None)
parse_mode = {
"html": ParseMode.HTML,
"markdown": ParseMode.MARKDOWN
}[config.chat_modes[chat_mode]["parse_mode"]]
chatgpt_instance = openai_utils.ChatGPT(model=current_model)
if config.enable_message_streaming:
gen = chatgpt_instance.send_message_stream(_message, dialog_messages=dialog_messages, chat_mode=chat_mode)
else:
answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed = await chatgpt_instance.send_message(
_message,
dialog_messages=dialog_messages,
chat_mode=chat_mode
)
async def fake_gen():
yield "finished", answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed
gen = fake_gen()
prev_answer = ""
async for gen_item in gen:
status, answer, (n_input_tokens, n_output_tokens), n_first_dialog_messages_removed = gen_item
answer = answer[:4096] # telegram message limit
# update only when 100 new symbols are ready
if abs(len(answer) - len(prev_answer)) < 100 and status != "finished":
continue
try:
await context.bot.edit_message_text(answer, chat_id=placeholder_message.chat_id, message_id=placeholder_message.message_id, parse_mode=parse_mode)
except telegram.error.BadRequest as e:
if str(e).startswith("Message is not modified"):
continue
else:
await context.bot.edit_message_text(answer, chat_id=placeholder_message.chat_id, message_id=placeholder_message.message_id)
await asyncio.sleep(0.01) # wait a bit to avoid flooding
prev_answer = answer
# update user data
new_dialog_message = {"user": _message, "bot": answer, "date": datetime.now()}
db.set_dialog_messages(
user_id,
db.get_dialog_messages(user_id, dialog_id=None) + [new_dialog_message],
dialog_id=None
)
db.update_n_used_tokens(user_id, current_model, n_input_tokens, n_output_tokens)
except asyncio.CancelledError:
# note: intermediate token updates only work when enable_message_streaming=True (config.yml)
db.update_n_used_tokens(user_id, current_model, n_input_tokens, n_output_tokens)
raise
except Exception as e:
error_text = f"Something went wrong during completion. Reason: {e}"
logger.error(error_text)
await update.message.reply_text(error_text)
return
# send message if some messages were removed from the context
if n_first_dialog_messages_removed > 0:
if n_first_dialog_messages_removed == 1:
text = "✍️ <i>Note:</i> Your current dialog is too long, so your <b>first message</b> was removed from the context.\n Send /new command to start new dialog"
else:
text = f"✍️ <i>Note:</i> Your current dialog is too long, so <b>{n_first_dialog_messages_removed} first messages</b> were removed from the context.\n Send /new command to start new dialog"
await update.message.reply_text(text, parse_mode=ParseMode.HTML)
async with user_semaphores[user_id]:
task = asyncio.create_task(message_handle_fn())
user_tasks[user_id] = task
try:
await task
except asyncio.CancelledError:
await update.message.reply_text("✅ Canceled", parse_mode=ParseMode.HTML)
else:
pass
finally:
if user_id in user_tasks:
del user_tasks[user_id]
async def voice_message_handle(update: Update, context: CallbackContext):
# check if bot was mentioned (for group chats)
if not await is_bot_mentioned(update, context):
return
await register_user_if_not_exists(update, context, update.message.from_user)
if await is_previous_message_not_answered_yet(update, context): return
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
voice = update.message.voice
voice_file = await context.bot.get_file(voice.file_id)
# store file in memory, not on disk
buf = io.BytesIO()
await voice_file.download_to_memory(buf)
buf.name = "voice.oga" # file extension is required
buf.seek(0) # move cursor to the beginning of the buffer
transcribed_text = await openai_utils.transcribe_audio(buf)
text = f"🎤: <i>{transcribed_text}</i>"
await update.message.reply_text(text, parse_mode=ParseMode.HTML)
# update n_transcribed_seconds
db.set_user_attribute(user_id, "n_transcribed_seconds", voice.duration + db.get_user_attribute(user_id, "n_transcribed_seconds"))
await message_handle(update, context, message=transcribed_text)
async def new_dialog_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
if await is_previous_message_not_answered_yet(update, context): return
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
db.start_new_dialog(user_id)
await update.message.reply_text("Starting new dialog ✅")
chat_mode = db.get_user_attribute(user_id, "current_chat_mode")
await update.message.reply_text(f"{config.chat_modes[chat_mode]['welcome_message']}", parse_mode=ParseMode.HTML)
async def cancel_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
if user_id in user_tasks:
task = user_tasks[user_id]
task.cancel()
else:
await update.message.reply_text("<i>Nothing to cancel...</i>", parse_mode=ParseMode.HTML)
async def show_chat_modes_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
if await is_previous_message_not_answered_yet(update, context): return
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
text, reply_markup = get_chat_mode_menu(0)
await update.message.reply_text(text, reply_markup=reply_markup, parse_mode=ParseMode.HTML)
async def show_chat_modes_callback_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update.callback_query, context, update.callback_query.from_user)
if await is_previous_message_not_answered_yet(update.callback_query, context): return
user_id = update.callback_query.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
query = update.callback_query
await query.answer()
page_index = int(query.data.split("|")[1])
if page_index < 0:
return
text, reply_markup = get_chat_mode_menu(page_index)
try:
await query.edit_message_text(text, reply_markup=reply_markup, parse_mode=ParseMode.HTML)
except telegram.error.BadRequest as e:
if str(e).startswith("Message is not modified"):
pass
async def set_chat_mode_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update.callback_query, context, update.callback_query.from_user)
user_id = update.callback_query.from_user.id
query = update.callback_query
await query.answer()
chat_mode = query.data.split("|")[1]
db.set_user_attribute(user_id, "current_chat_mode", chat_mode)
db.start_new_dialog(user_id)
await context.bot.send_message(
update.callback_query.message.chat.id,
f"{config.chat_modes[chat_mode]['welcome_message']}",
parse_mode=ParseMode.HTML
)
async def settings_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
if await is_previous_message_not_answered_yet(update, context): return
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
text, reply_markup = get_settings_menu(user_id)
await update.message.reply_text(text, reply_markup=reply_markup, parse_mode=ParseMode.HTML)
async def set_settings_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update.callback_query, context, update.callback_query.from_user)
user_id = update.callback_query.from_user.id
query = update.callback_query
await query.answer()
_, model_key = query.data.split("|")
db.set_user_attribute(user_id, "current_model", model_key)
db.start_new_dialog(user_id)
text, reply_markup = get_settings_menu(user_id)
try:
await query.edit_message_text(text, reply_markup=reply_markup, parse_mode=ParseMode.HTML)
except telegram.error.BadRequest as e:
if str(e).startswith("Message is not modified"):
pass
async def show_balance_handle(update: Update, context: CallbackContext):
await register_user_if_not_exists(update, context, update.message.from_user)
user_id = update.message.from_user.id
db.set_user_attribute(user_id, "last_interaction", datetime.now())
# count total usage statistics
total_n_spent_dollars = 0
total_n_used_tokens = 0
n_used_tokens_dict = db.get_user_attribute(user_id, "n_used_tokens")
n_generated_images = db.get_user_attribute(user_id, "n_generated_images")
n_transcribed_seconds = db.get_user_attribute(user_id, "n_transcribed_seconds")
details_text = "🏷️ Details:\n"
for model_key in sorted(n_used_tokens_dict.keys()):
n_input_tokens, n_output_tokens = n_used_tokens_dict[model_key]["n_input_tokens"], n_used_tokens_dict[model_key]["n_output_tokens"]
total_n_used_tokens += n_input_tokens + n_output_tokens
n_input_spent_dollars = config.models["info"][model_key]["price_per_1000_input_tokens"] * (n_input_tokens / 1000)
n_output_spent_dollars = config.models["info"][model_key]["price_per_1000_output_tokens"] * (n_output_tokens / 1000)
total_n_spent_dollars += n_input_spent_dollars + n_output_spent_dollars
details_text += f"- {model_key}: <b>{n_input_spent_dollars + n_output_spent_dollars:.03f}$</b> / <b>{n_input_tokens + n_output_tokens} tokens</b>\n"
# image generation
image_generation_n_spent_dollars = config.models["info"]["dalle-2"]["price_per_1_image"] * n_generated_images
if n_generated_images != 0:
details_text += f"- DALL·E 2 (image generation): <b>{image_generation_n_spent_dollars:.03f}$</b> / <b>{n_generated_images} generated images</b>\n"
total_n_spent_dollars += image_generation_n_spent_dollars
# voice recognition
voice_recognition_n_spent_dollars = config.models["info"]["whisper"]["price_per_1_min"] * (n_transcribed_seconds / 60)
if n_transcribed_seconds != 0:
details_text += f"- Whisper (voice recognition): <b>{voice_recognition_n_spent_dollars:.03f}$</b> / <b>{n_transcribed_seconds:.01f} seconds</b>\n"
total_n_spent_dollars += voice_recognition_n_spent_dollars
text = f"You spent <b>{total_n_spent_dollars:.03f}$</b>\n"
text += f"You used <b>{total_n_used_tokens}</b> tokens\n\n"
text += details_text
await update.message.reply_text(text, parse_mode=ParseMode.HTML)
async def error_handle(update: Update, context: CallbackContext) -> None:
logger.error(msg="Exception while handling an update:", exc_info=context.error)
try:
# collect error message
tb_list = traceback.format_exception(None, context.error, context.error.__traceback__)
tb_string = "".join(tb_list)
update_str = update.to_dict() if isinstance(update, Update) else str(update)
message = (
f"An exception was raised while handling an update\n"
f"<pre>update = {html.escape(json.dumps(update_str, indent=2, ensure_ascii=False))}"
"</pre>\n\n"
f"<pre>{html.escape(tb_string)}</pre>"
)
# split text into multiple messages due to 4096 character limit
for message_chunk in split_text_into_chunks(message, 4096):
try:
await context.bot.send_message(update.effective_chat.id, message_chunk, parse_mode=ParseMode.HTML)
except telegram.error.BadRequest:
# answer has invalid characters, so we send it without parse_mode
await context.bot.send_message(update.effective_chat.id, message_chunk)
except:
await context.bot.send_message(update.effective_chat.id, "Some error in error handler")
async def post_init(application: Application):
await application.bot.set_my_commands([
BotCommand("/new", "Start new dialog"),
BotCommand("/mode", "Select chat mode"),
BotCommand("/retry", "Re-generate response for previous query"),
BotCommand("/balance", "Show balance"),
BotCommand("/settings", "Show settings"),
BotCommand("/help", "Show help message"),
])
def run_bot() -> None:
application = (
ApplicationBuilder()
.token(config.telegram_token)
.concurrent_updates(True)
.rate_limiter(AIORateLimiter(max_retries=5))
.http_version("1.1")
.get_updates_http_version("1.1")
.post_init(post_init)
.build()
)
# add handlers
user_filter = filters.ALL
if len(config.allowed_telegram_usernames) > 0:
usernames = [x for x in config.allowed_telegram_usernames if isinstance(x, str)]
any_ids = [x for x in config.allowed_telegram_usernames if isinstance(x, int)]
user_ids = [x for x in any_ids if x > 0]
group_ids = [x for x in any_ids if x < 0]
user_filter = filters.User(username=usernames) | filters.User(user_id=user_ids) | filters.Chat(chat_id=group_ids)
application.add_handler(CommandHandler("start", start_handle, filters=user_filter))
application.add_handler(CommandHandler("help", help_handle, filters=user_filter))
application.add_handler(CommandHandler("help_group_chat", help_group_chat_handle, filters=user_filter))
application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND & user_filter, message_handle))
application.add_handler(CommandHandler("retry", retry_handle, filters=user_filter))
application.add_handler(CommandHandler("new", new_dialog_handle, filters=user_filter))
application.add_handler(CommandHandler("cancel", cancel_handle, filters=user_filter))
application.add_handler(MessageHandler(filters.VOICE & user_filter, voice_message_handle))
application.add_handler(CommandHandler("mode", show_chat_modes_handle, filters=user_filter))
application.add_handler(CallbackQueryHandler(show_chat_modes_callback_handle, pattern="^show_chat_modes"))
application.add_handler(CallbackQueryHandler(set_chat_mode_handle, pattern="^set_chat_mode"))
application.add_handler(CommandHandler("settings", settings_handle, filters=user_filter))
application.add_handler(CallbackQueryHandler(set_settings_handle, pattern="^set_settings"))
application.add_handler(CommandHandler("balance", show_balance_handle, filters=user_filter))
application.add_error_handler(error_handle)
# start the bot
application.run_polling() | null |
188,922 | import os
from typing import Dict, List, Optional, Tuple, Union
import torch
from fastchat.conversation import (compute_skip_echo_len,
get_default_conv_template)
from fastchat.serve.inference import load_model as load_fastchat_model
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
from config import *
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect() | null |
188,923 | import os
from typing import Dict, List, Optional, Tuple, Union
import torch
from fastchat.conversation import (compute_skip_echo_len,
get_default_conv_template)
from fastchat.serve.inference import load_model as load_fastchat_model
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
from config import *
def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
num_trans_layers = 28
per_gpu_layers = 30 / num_gpus
device_map = {
'transformer.word_embeddings': 0,
'transformer.final_layernorm': 0,
'lm_head': 0
}
used = 2
gpu_target = 0
for i in range(num_trans_layers):
if used >= per_gpu_layers:
gpu_target += 1
used = 0
assert gpu_target < num_gpus
device_map[f'transformer.layers.{i}'] = gpu_target
used += 1
return device_map | null |
188,924 | import os
import gradio as gr
import nltk
import torch
from chatglm_llm import ChatGLM
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from langchain.vectorstores import FAISS
from modelscope_hub import ModelScopeEmbeddings
def clear_session():
return '', None | null |
188,925 | import os
import gradio as gr
import nltk
import torch
from chatglm_llm import ChatGLM
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from langchain.vectorstores import FAISS
from modelscope_hub import ModelScopeEmbeddings
def search_web(query):
def init_knowledge_vector_store(embedding_model, filepath):
def get_knowledge_based_answer(
query,
large_language_model,
vector_store,
VECTOR_SEARCH_TOP_K,
web_content,
chat_history=[],
history_len=3,
temperature=0.01,
top_p=0.9,
):
def predict(input,
large_language_model,
embedding_model,
file_obj,
VECTOR_SEARCH_TOP_K,
history_len,
temperature,
top_p,
use_web,
history=None):
if history == None:
history = []
print(file_obj.name)
if use_web == 'True':
web_content = search_web(query=input)
else:
web_content = ''
vector_store = init_knowledge_vector_store(embedding_model, file_obj.name)
resp = get_knowledge_based_answer(
query=input,
large_language_model=large_language_model,
vector_store=vector_store,
VECTOR_SEARCH_TOP_K=VECTOR_SEARCH_TOP_K,
web_content=web_content,
chat_history=history,
history_len=history_len,
temperature=temperature,
top_p=top_p,
)
print(resp)
history.append((input, resp))
return '', history, history | null |
188,926 | import os
from typing import List, Optional
import torch
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect() | null |
188,927 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain_community.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
import torch
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history | null |
188,928 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain_community.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
import torch
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def init_model():
try:
print("开始加载模型配置")
knowladge_based_chat_llm.init_model_config()
print("模型配置加载成功")
knowladge_based_chat_llm.llm._call("你好")
return """初始模型已成功加载,可以开始对话"""
except Exception as e:
print(f"加载模型出错: {e}") # 打印详细的异常信息
return """模型未成功加载,请重新选择模型后点击"重新加载模型"按钮""" | null |
188,929 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain_community.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
import torch
def clear_session():
return '', None | null |
188,930 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain_community.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
import torch
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
model_status = init_model()
def reinit_model(large_language_model, embedding_model, history):
try:
knowladge_based_chat_llm.init_model_config(
large_language_model=large_language_model,
embedding_model=embedding_model)
model_status = """模型已成功重新加载,可以开始对话"""
except Exception as e:
model_status = """模型未成功重新加载,请点击重新加载模型"""
return history + [[None, model_status]] | null |
188,931 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain_community.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
import torch
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def init_vector_store(file_obj):
vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
file_obj.name)
return vector_store | null |
188,932 | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain_community.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
import torch
def search_web(query):
SESSION.proxies = {
"http": f"socks5h://localhost:7890",
"https": f"socks5h://localhost:7890"
}
results = ddg(query)
web_content = ''
if results:
for result in results:
web_content += result['body']
return web_content
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def predict(input,
use_web,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
if use_web == 'True':
web_content = search_web(query=input)
else:
web_content = ''
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
web_content=web_content,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history | null |
188,933 | import os
import gradio as gr
import nltk
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from paddle_embedding import PaddleNLPEmbeddings
from chatllm import ChatLLM
def clear_session():
return '', None | null |