id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
188,734 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import IR
The provided code snippet includes necessary dependencies for implementing the `abi_language_decoder___get_length__default` function. Write a Python function `def abi_language_decoder___get_length__default(self, logit: torch.Tensor, dim: int = -1, **kwargs) -> torch.Tensor` to solve the following problem:
Rewrite `_get_length`. Add `.float()` to cast Tensors from bool to float for `cumsum` and `argmax`. Returns the first location of padding index or the length of the entire tensor otherwise.
Here is the function:
def abi_language_decoder___get_length__default(self,
logit: torch.Tensor,
dim: int = -1,
**kwargs) -> torch.Tensor:
"""Rewrite `_get_length`. Add `.float()` to cast Tensors from bool to float
for `cumsum` and `argmax`.
Returns the first location of padding index or the length of the entire
tensor otherwise.
"""
# out as a boolean vector indicating the existence of end token(s)
out = (logit.argmax(dim=-1) == self.dictionary.end_idx)
abn = out.any(dim)
# Get the first index of end token
# add `.float()` to `out` for onnxruntime `cumsum()`
# add `.float()` before `argmax()`
out = ((out.float().cumsum(dim) == 1) & out).float().argmax(dim)
out = out + 1
out = torch.where(abn, out,
out.new_tensor(logit.shape[1]).to(out.device)).float()
return out | Rewrite `_get_length`. Add `.float()` to cast Tensors from bool to float for `cumsum` and `argmax`. Returns the first location of padding index or the length of the entire tensor otherwise. |
188,735 | from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `crnndecoder__forward_train__ncnn` function. Write a Python function `def crnndecoder__forward_train__ncnn(self, feat, *args, **kwargs)` to solve the following problem:
Rewrite `forward_train` of CRNNDecoder for ncnn backend. Rewrite this function to skip permuting dims of outputs from `[W, N, C]` to `[N, W, C]`
Here is the function:
def crnndecoder__forward_train__ncnn(self, feat, *args, **kwargs):
"""Rewrite `forward_train` of CRNNDecoder for ncnn backend.
Rewrite this function to skip permuting dims of outputs from `[W, N, C]` to
`[N, W, C]`
"""
assert feat.size(2) == 1, 'feature height must be 1'
if self.rnn_flag:
x = feat.squeeze(2) # [N, C, W]
x = x.permute(0, 2, 1) # [N, W, C]
outputs = self.decoder(x)
else:
x = self.decoder(feat)
x = x.permute(0, 3, 1, 2).contiguous()
n, w, c, h = x.size()
outputs = x.view(n, w, c * h)
return outputs | Rewrite `forward_train` of CRNNDecoder for ncnn backend. Rewrite this function to skip permuting dims of outputs from `[W, N, C]` to `[N, W, C]` |
188,736 | import copy
from typing import Optional, Sequence
import torch
import torch.nn.functional as F
from mmocr.utils.typing_utils import TextRecogDataSample
from torch import nn
from mmdeploy.core import FUNCTION_REWRITER, MODULE_REWRITER
The provided code snippet includes necessary dependencies for implementing the `parallel_sar_decoder__2d_attention` function. Write a Python function `def parallel_sar_decoder__2d_attention( self, decoder_input: torch.Tensor, feat: torch.Tensor, holistic_feat: torch.Tensor, valid_ratios: Optional[Sequence[float]] = None) -> torch.Tensor` to solve the following problem:
Rewrite `_2d_attention` of ParallelSARDecoder for default backend. Rewrite this function to: 1. use torch.ceil to replace original math.ceil and if else in mmocr. 2. use narrow to replace original [valid_width:] in mmocr
Here is the function:
def parallel_sar_decoder__2d_attention(
self,
decoder_input: torch.Tensor,
feat: torch.Tensor,
holistic_feat: torch.Tensor,
valid_ratios: Optional[Sequence[float]] = None) -> torch.Tensor:
"""Rewrite `_2d_attention` of ParallelSARDecoder for default backend.
Rewrite this function to:
1. use torch.ceil to replace original math.ceil and if else in mmocr.
2. use narrow to replace original [valid_width:] in mmocr
"""
y = self.rnn_decoder(decoder_input)[0]
# y: bsz * (seq_len + 1) * hidden_size
attn_query = self.conv1x1_1(y) # bsz * (seq_len + 1) * attn_size
bsz, seq_len, attn_size = attn_query.size()
attn_query = attn_query.view(bsz, seq_len, attn_size, 1, 1)
attn_key = self.conv3x3_1(feat)
# bsz * attn_size * h * w
attn_key = attn_key.unsqueeze(1)
# bsz * 1 * attn_size * h * w
attn_weight = torch.tanh(torch.add(attn_key, attn_query, alpha=1))
# bsz * (seq_len + 1) * attn_size * h * w
attn_weight = attn_weight.permute(0, 1, 3, 4, 2).contiguous()
# bsz * (seq_len + 1) * h * w * attn_size
attn_weight = self.conv1x1_2(attn_weight)
# bsz * (seq_len + 1) * h * w * 1
bsz, T, h, w, c = attn_weight.size()
assert c == 1
if valid_ratios is not None:
# cal mask of attention weight
attn_mask = torch.zeros(bsz, T, h, w + 1, c).to(attn_weight.device)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_width = torch.tensor(w * valid_ratio).ceil().long()
# use narrow to replace original [valid_width:] in mmocr
attn_mask[i].narrow(2, valid_width, w + 1 - valid_width)[:] = 1
attn_mask = attn_mask[:, :, :, :w, :]
attn_weight = attn_weight.masked_fill(attn_mask.bool(), float('-inf'))
attn_weight = attn_weight.view(bsz, T, -1)
attn_weight = F.softmax(attn_weight, dim=-1)
attn_weight = attn_weight.view(bsz, T, h, w, c).permute(0, 1, 4, 2,
3).contiguous()
attn_feat = torch.sum(
torch.mul(feat.unsqueeze(1), attn_weight), (3, 4), keepdim=False)
# bsz * (seq_len + 1) * C
# linear transformation
if self.pred_concat:
hf_c = holistic_feat.size(-1)
holistic_feat = holistic_feat.expand(bsz, seq_len, hf_c)
y = self.prediction(torch.cat((y, attn_feat, holistic_feat), 2))
else:
y = self.prediction(attn_feat)
# bsz * (seq_len + 1) * num_classes
y = self.pred_dropout(y)
return y | Rewrite `_2d_attention` of ParallelSARDecoder for default backend. Rewrite this function to: 1. use torch.ceil to replace original math.ceil and if else in mmocr. 2. use narrow to replace original [valid_width:] in mmocr |
188,737 | import copy
from typing import Optional, Sequence
import torch
import torch.nn.functional as F
from mmocr.utils.typing_utils import TextRecogDataSample
from torch import nn
from mmdeploy.core import FUNCTION_REWRITER, MODULE_REWRITER
The provided code snippet includes necessary dependencies for implementing the `sequential_sar_decoder__2d_attention` function. Write a Python function `def sequential_sar_decoder__2d_attention(self, y_prev, feat, holistic_feat, hx1, cx1, hx2, cx2, valid_ratios=None)` to solve the following problem:
Rewrite `_2d_attention` of SequentialSARDecoder for default backend. Rewrite this function to: 1. use torch.ceil to replace original math.ceil and if else in mmocr. 2. use narrow to replace original [valid_width:] in mmocr
Here is the function:
def sequential_sar_decoder__2d_attention(self,
y_prev,
feat,
holistic_feat,
hx1,
cx1,
hx2,
cx2,
valid_ratios=None):
"""Rewrite `_2d_attention` of SequentialSARDecoder for default backend.
Rewrite this function to:
1. use torch.ceil to replace original math.ceil and if else in mmocr.
2. use narrow to replace original [valid_width:] in mmocr
"""
_, _, h_feat, w_feat = feat.size()
if self.dec_gru:
hx1 = cx1 = self.rnn_decoder_layer1(y_prev, hx1)
hx2 = cx2 = self.rnn_decoder_layer2(hx1, hx2)
else:
# has replaced LSTMCell with LSTM, forward func need rewrite
_, (hx1,
cx1) = self.rnn_decoder_layer1(y_prev.unsqueeze(0), (hx1, cx1))
_, (hx2, cx2) = self.rnn_decoder_layer2(hx1, (hx2, cx2))
tile_hx2 = hx2.view(hx2.size(1), hx2.size(-1), 1, 1)
attn_query = self.conv1x1_1(tile_hx2) # bsz * attn_size * 1 * 1
attn_query = attn_query.expand(-1, -1, h_feat, w_feat)
attn_key = self.conv3x3_1(feat)
attn_weight = torch.tanh(torch.add(attn_key, attn_query, alpha=1))
attn_weight = self.conv1x1_2(attn_weight)
bsz, c, h, w = attn_weight.size()
assert c == 1
if valid_ratios is not None:
# cal mask of attention weight
attn_mask = torch.zeros(bsz, c, h, w + 1).to(attn_weight.device)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_width = torch.tensor(w * valid_ratio).ceil().long()
# use narrow to replace original [valid_width:] in mmocr
attn_mask[i].narrow(2, valid_width, w + 1 - valid_width)[:] = 1
attn_mask = attn_mask[:, :, :, :w]
attn_weight = attn_weight.masked_fill(attn_mask.bool(), float('-inf'))
attn_weight = F.softmax(attn_weight.view(bsz, -1), dim=-1)
attn_weight = attn_weight.view(bsz, c, h, w)
attn_feat = torch.sum(
torch.mul(feat, attn_weight), (2, 3), keepdim=False) # n * c
# linear transformation
if self.pred_concat:
y = self.prediction(torch.cat((hx2[0], attn_feat, holistic_feat), 1))
else:
y = self.prediction(attn_feat)
return y, hx1, hx1, hx2, hx2 | Rewrite `_2d_attention` of SequentialSARDecoder for default backend. Rewrite this function to: 1. use torch.ceil to replace original math.ceil and if else in mmocr. 2. use narrow to replace original [valid_width:] in mmocr |
188,738 | import copy
from typing import Optional, Sequence
import torch
import torch.nn.functional as F
from mmocr.utils.typing_utils import TextRecogDataSample
from torch import nn
from mmdeploy.core import FUNCTION_REWRITER, MODULE_REWRITER
The provided code snippet includes necessary dependencies for implementing the `sequential_sar_decoder__forward_test` function. Write a Python function `def sequential_sar_decoder__forward_test( self, feat: torch.Tensor, out_enc: torch.Tensor, data_samples: Optional[Sequence[TextRecogDataSample]] = None)` to solve the following problem:
Rewrite `forward_test` of SequentialSARDecoder for default backend. Rewrite this function because LSTMCell has been replaced with LSTM. The two class have different forward functions. The `forward_test` need adapt to this change.
Here is the function:
def sequential_sar_decoder__forward_test(
self,
feat: torch.Tensor,
out_enc: torch.Tensor,
data_samples: Optional[Sequence[TextRecogDataSample]] = None):
"""Rewrite `forward_test` of SequentialSARDecoder for default backend.
Rewrite this function because LSTMCell has been replaced with LSTM. The two
class have different forward functions. The `forward_test` need adapt to
this change.
"""
valid_ratios = None
if data_samples is not None:
valid_ratios = [
data_sample.get('valid_ratio', 1.0) for data_sample in data_samples
] if self.mask else None
outputs = []
start_token = torch.full((feat.size(0), ),
self.start_idx,
device=feat.device,
dtype=torch.long)
start_token = self.embedding(start_token)
for i in range(-1, self.max_seq_len):
if i == -1:
if self.dec_gru:
hx1 = cx1 = self.rnn_decoder_layer1(out_enc)
hx2 = cx2 = self.rnn_decoder_layer2(hx1)
else:
# has replaced LSTMCell with LSTM, forward func need rewrite
_, (hx1, cx1) = self.rnn_decoder_layer1(out_enc.unsqueeze(0))
_, (hx2, cx2) = self.rnn_decoder_layer2(hx1)
y_prev = start_token
else:
y, hx1, cx1, hx2, cx2 = self._2d_attention(
y_prev,
feat,
out_enc,
hx1,
cx1,
hx2,
cx2,
valid_ratios=valid_ratios)
_, max_idx = torch.max(y, dim=1, keepdim=False)
char_embedding = self.embedding(max_idx)
y_prev = char_embedding
outputs.append(y)
outputs = torch.stack(outputs, 1)
return outputs | Rewrite `forward_test` of SequentialSARDecoder for default backend. Rewrite this function because LSTMCell has been replaced with LSTM. The two class have different forward functions. The `forward_test` need adapt to this change. |
188,739 | from typing import Optional, Sequence
import torch
from mmocr.structures import TextRecogDataSample
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `base_decoder__forward` function. Write a Python function `def base_decoder__forward( self, feat: Optional[torch.Tensor] = None, out_enc: Optional[torch.Tensor] = None, data_samples: Optional[Sequence[TextRecogDataSample]] = None ) -> Sequence[TextRecogDataSample]` to solve the following problem:
Rewrite `predict` of `BaseDecoder` to skip post-process. Args: feat (Tensor, optional): Features from the backbone. Defaults to None. out_enc (Tensor, optional): Features from the encoder. Defaults to None. data_samples (list[TextRecogDataSample]): A list of N datasamples, containing meta information and gold annotations for each of the images. Defaults to None.
Here is the function:
def base_decoder__forward(
self,
feat: Optional[torch.Tensor] = None,
out_enc: Optional[torch.Tensor] = None,
data_samples: Optional[Sequence[TextRecogDataSample]] = None
) -> Sequence[TextRecogDataSample]:
"""Rewrite `predict` of `BaseDecoder` to skip post-process.
Args:
feat (Tensor, optional): Features from the backbone. Defaults
to None.
out_enc (Tensor, optional): Features from the encoder. Defaults
to None.
data_samples (list[TextRecogDataSample]): A list of N datasamples,
containing meta information and gold annotations for each of
the images. Defaults to None.
"""
out_dec = self(feat, out_enc, data_samples)
return out_dec | Rewrite `predict` of `BaseDecoder` to skip post-process. Args: feat (Tensor, optional): Features from the backbone. Defaults to None. out_enc (Tensor, optional): Features from the encoder. Defaults to None. data_samples (list[TextRecogDataSample]): A list of N datasamples, containing meta information and gold annotations for each of the images. Defaults to None. |
188,740 | from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `bidirectionallstm__forward__ncnn` function. Write a Python function `def bidirectionallstm__forward__ncnn(self, input)` to solve the following problem:
Rewrite `forward` of BidirectionalLSTM for ncnn backend. Rewrite this function to set batch_first of rnn layer to true. RNN in ncnn requires batch first. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class BidirectionalLSTM. input (Tensor): Input tensor of shape (N, H, W). Returns: output (Tensor): Embedded tensor from embedding layer.
Here is the function:
def bidirectionallstm__forward__ncnn(self, input):
"""Rewrite `forward` of BidirectionalLSTM for ncnn backend.
Rewrite this function to set batch_first of rnn layer to true. RNN in ncnn
requires batch first.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class
BidirectionalLSTM.
input (Tensor): Input tensor of shape (N, H, W).
Returns:
output (Tensor): Embedded tensor from embedding layer.
"""
self.rnn.batch_first = True
recurrent, _ = self.rnn(input)
self.rnn.batch_first = False
output = self.embedding(recurrent)
return output | Rewrite `forward` of BidirectionalLSTM for ncnn backend. Rewrite this function to set batch_first of rnn layer to true. RNN in ncnn requires batch first. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class BidirectionalLSTM. input (Tensor): Input tensor of shape (N, H, W). Returns: output (Tensor): Embedded tensor from embedding layer. |
188,741 | from typing import Optional, Sequence
import torch
import torch.nn.functional as F
from mmocr.structures import TextRecogDataSample
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `sar_encoder__forward` function. Write a Python function `def sar_encoder__forward( self, feat: torch.Tensor, data_samples: Optional[Sequence[TextRecogDataSample]] = None)` to solve the following problem:
Rewrite `forward` of SAREncoder for default backend. Rewrite this function to: 1. convert tuple value of feat.size to int, making model exportable. 2. use torch.ceil to replace original math.ceil and if else in mmocr. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class SAREncoder. feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`. data_samples (list[TextRecogDataSample], optional): Batch of TextRecogDataSample, containing valid_ratio information. Defaults to None. Returns: holistic_feat (Tensor): A feature map output from SAREncoder. The shape [N, M].
Here is the function:
def sar_encoder__forward(
self,
feat: torch.Tensor,
data_samples: Optional[Sequence[TextRecogDataSample]] = None):
"""Rewrite `forward` of SAREncoder for default backend.
Rewrite this function to:
1. convert tuple value of feat.size to int, making model exportable.
2. use torch.ceil to replace original math.ceil and if else in mmocr.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class SAREncoder.
feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`.
data_samples (list[TextRecogDataSample], optional): Batch of
TextRecogDataSample, containing valid_ratio information.
Defaults to None.
Returns:
holistic_feat (Tensor): A feature map output from SAREncoder. The shape
[N, M].
"""
if data_samples is not None:
assert len(data_samples) == feat.size(0)
valid_ratios = None
if data_samples is not None:
valid_ratios = [
data_sample.get('valid_ratio', 1.0) for data_sample in data_samples
] if self.mask else None
h_feat = int(feat.size(2))
feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0)
feat_v = feat_v.squeeze(2) # bsz * C * W
feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C
holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C
if valid_ratios is not None:
valid_hf = []
T = holistic_feat.size(1)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_step = torch.tensor(T * valid_ratio).ceil().long() - 1
valid_hf.append(holistic_feat[i, valid_step, :])
valid_hf = torch.stack(valid_hf, dim=0)
else:
valid_hf = holistic_feat[:, -1, :] # bsz * C
holistic_feat = self.linear(valid_hf) # bsz * C
return holistic_feat | Rewrite `forward` of SAREncoder for default backend. Rewrite this function to: 1. convert tuple value of feat.size to int, making model exportable. 2. use torch.ceil to replace original math.ceil and if else in mmocr. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class SAREncoder. feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`. data_samples (list[TextRecogDataSample], optional): Batch of TextRecogDataSample, containing valid_ratio information. Defaults to None. Returns: holistic_feat (Tensor): A feature map output from SAREncoder. The shape [N, M]. |
188,742 | import math
from typing import List
from mmocr.structures import TextRecogDataSample
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `satrn_encoder__forward` function. Write a Python function `def satrn_encoder__forward( self, feat: Tensor, data_samples: List[TextRecogDataSample] = None) -> Tensor` to solve the following problem:
Forward propagation of encoder. Args: feat (Tensor): Feature tensor of shape :math:`(N, D_m, H, W)`. data_samples (list[TextRecogDataSample]): Batch of TextRecogDataSample, containing `valid_ratio` information. Defaults to None. Returns: Tensor: A tensor of shape :math:`(N, T, D_m)`.
Here is the function:
def satrn_encoder__forward(
self,
feat: Tensor,
data_samples: List[TextRecogDataSample] = None) -> Tensor:
"""Forward propagation of encoder.
Args:
feat (Tensor): Feature tensor of shape :math:`(N, D_m, H, W)`.
data_samples (list[TextRecogDataSample]): Batch of
TextRecogDataSample, containing `valid_ratio` information.
Defaults to None.
Returns:
Tensor: A tensor of shape :math:`(N, T, D_m)`.
"""
valid_ratio = 1.0
feat = self.position_enc(feat)
n, c, h, w = feat.size()
mask = feat.new_zeros((n, h, w))
valid_width = min(w, math.ceil(w * valid_ratio))
mask[:, :, :valid_width] = 1
mask = mask.view(n, h * w)
feat = feat.view(n, c, h * w)
output = feat.permute(0, 2, 1).contiguous()
for enc_layer in self.layer_stack:
output = enc_layer(output, h, w, mask)
output = self.layer_norm(output)
return output | Forward propagation of encoder. Args: feat (Tensor): Feature tensor of shape :math:`(N, D_m, H, W)`. data_samples (list[TextRecogDataSample]): Batch of TextRecogDataSample, containing `valid_ratio` information. Defaults to None. Returns: Tensor: A tensor of shape :math:`(N, T, D_m)`. |
188,743 | import math
from typing import Sequence
import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `nrtr_decoder___get_source_mask` function. Write a Python function `def nrtr_decoder___get_source_mask( self, src_seq: torch.Tensor, valid_ratios: Sequence[float]) -> torch.Tensor` to solve the following problem:
Generate mask for source sequence. Args: src_seq (torch.Tensor): Image sequence. Shape :math:`(N, T, C)`. valid_ratios (list[float]): The valid ratio of input image. For example, if the width of the original image is w1 and the width after padding is w2, then valid_ratio = w1/w2. Source mask is used to cover the area of the padding region. Returns: Tensor or None: Source mask. Shape :math:`(N, T)`. The region of padding area are False, and the rest are True.
Here is the function:
def nrtr_decoder___get_source_mask(
self, src_seq: torch.Tensor,
valid_ratios: Sequence[float]) -> torch.Tensor:
"""Generate mask for source sequence.
Args:
src_seq (torch.Tensor): Image sequence. Shape :math:`(N, T, C)`.
valid_ratios (list[float]): The valid ratio of input image. For
example, if the width of the original image is w1 and the width
after padding is w2, then valid_ratio = w1/w2. Source mask is
used to cover the area of the padding region.
Returns:
Tensor or None: Source mask. Shape :math:`(N, T)`. The region of
padding area are False, and the rest are True.
"""
N, T, _ = src_seq.size()
mask = None
if len(valid_ratios) > 0:
mask = src_seq.new_zeros((N, T), device=src_seq.device)
valid_width = min(T, math.ceil(T * valid_ratios[0]))
mask[:, :valid_width] = 1
return mask | Generate mask for source sequence. Args: src_seq (torch.Tensor): Image sequence. Shape :math:`(N, T, C)`. valid_ratios (list[float]): The valid ratio of input image. For example, if the width of the original image is w1 and the width after padding is w2, then valid_ratio = w1/w2. Source mask is used to cover the area of the padding region. Returns: Tensor or None: Source mask. Shape :math:`(N, T)`. The region of padding area are False, and the rest are True. |
188,744 | import torch
from mmocr.structures import TextRecogDataSample
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `encoder_decoder_recognizer__forward` function. Write a Python function `def encoder_decoder_recognizer__forward(self, batch_inputs: torch.Tensor, data_samples: TextRecogDataSample, **kwargs) -> TextRecogDataSample` to solve the following problem:
Rewrite `forward` of EncoderDecoderRecognizer for default backend. Rewrite this function to early return the results to avoid post processing. The process is not suitable for exporting to backends and better get implemented in SDK. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class EncoderDecoderRecognizer. batch_inputs (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. data_samples (TextRecogDataSample): Containing meta information and gold annotations for each of the images. Defaults to None. Returns: out_dec (Tensor): A feature map output from a decoder. The tensor shape (N, H, W).
Here is the function:
def encoder_decoder_recognizer__forward(self, batch_inputs: torch.Tensor,
data_samples: TextRecogDataSample,
**kwargs) -> TextRecogDataSample:
"""Rewrite `forward` of EncoderDecoderRecognizer for default backend.
Rewrite this function to early return the results to avoid post processing.
The process is not suitable for exporting to backends and better get
implemented in SDK.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class
EncoderDecoderRecognizer.
batch_inputs (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
data_samples (TextRecogDataSample): Containing meta information
and gold annotations for each of the images. Defaults to None.
Returns:
out_dec (Tensor): A feature map output from a decoder. The tensor shape
(N, H, W).
"""
feat = self.extract_feat(batch_inputs)
out_enc = None
if self.with_encoder:
out_enc = self.encoder(feat, data_samples)
return self.decoder.predict(feat, out_enc, data_samples) | Rewrite `forward` of EncoderDecoderRecognizer for default backend. Rewrite this function to early return the results to avoid post processing. The process is not suitable for exporting to backends and better get implemented in SDK. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class EncoderDecoderRecognizer. batch_inputs (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. data_samples (TextRecogDataSample): Containing meta information and gold annotations for each of the images. Defaults to None. Returns: out_dec (Tensor): A feature map output from a decoder. The tensor shape (N, H, W). |
188,745 | from typing import Union
import mmengine
from mmdeploy.utils import load_config
The provided code snippet includes necessary dependencies for implementing the `get_resize_ocr` function. Write a Python function `def get_resize_ocr(model_cfg: Union[str, mmengine.Config])` to solve the following problem:
Get the test settings of ResizeOCR in model config. Args: model_cfg (str | mmengine.Config): Model config file or loaded Config object. Returns: tuple, composed of min_width, max_width and keep_aspect_ratio.
Here is the function:
def get_resize_ocr(model_cfg: Union[str, mmengine.Config]):
"""Get the test settings of ResizeOCR in model config.
Args:
model_cfg (str | mmengine.Config): Model config file or loaded Config
object.
Returns:
tuple, composed of min_width, max_width and keep_aspect_ratio.
"""
model_cfg = load_config(model_cfg)[0]
from mmdet.datasets.pipelines import Compose
from mmocr.datasets import build_dataset # noqa: F401
test_pipeline = Compose(model_cfg.data.test.pipeline)
resize_ocr = test_pipeline.transforms[1].transforms.transforms[0]
return (resize_ocr.min_width, resize_ocr.max_width,
resize_ocr.keep_aspect_ratio) | Get the test settings of ResizeOCR in model config. Args: model_cfg (str | mmengine.Config): Model config file or loaded Config object. Returns: tuple, composed of min_width, max_width and keep_aspect_ratio. |
188,746 | from typing import Optional, Sequence, Union
import torch
from mmdet.structures import DetDataSample
from mmdet.structures import SampleList as MMDET_SampleList
from mmocr.structures import TextDetDataSample
from mmocr.utils.typing_utils import DetSampleList
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `mmdet_wrapper__forward` function. Write a Python function `def mmdet_wrapper__forward(self, inputs: torch.Tensor, data_samples: Optional[Union[ DetSampleList, MMDET_SampleList]] = None, mode: str = 'tensor', **kwargs) -> Sequence[TextDetDataSample]` to solve the following problem:
The unified entry for a forward process in both training and test. The method works in three modes: "tensor", "predict" and "loss": - "tensor": Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - "predict": Forward and return the predictions, which are fully processed to a list of :obj:`DetDataSample`. - "loss": Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle either back propagation or parameter update, which are supposed to be done in :meth:`train_step`. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (list[:obj:`DetDataSample`] or list[:obj:`TextDetDataSample`]): The annotation data of every sample. When in "predict" mode, it should be a list of :obj:`TextDetDataSample`. Otherwise they are :obj:`DetDataSample`s. Defaults to None. mode (str): Running mode. Defaults to 'tensor'. Returns: results (Sequence(torch.Tensor)): Output of MMDet models.
Here is the function:
def mmdet_wrapper__forward(self,
inputs: torch.Tensor,
data_samples: Optional[Union[
DetSampleList, MMDET_SampleList]] = None,
mode: str = 'tensor',
**kwargs) -> Sequence[TextDetDataSample]:
"""The unified entry for a forward process in both training and test.
The method works in three modes: "tensor", "predict" and "loss":
- "tensor": Forward the whole network and return tensor or tuple of
tensor without any post-processing, same as a common nn.Module.
- "predict": Forward and return the predictions, which are fully
processed to a list of :obj:`DetDataSample`.
- "loss": Forward and return a dict of losses according to the given
inputs and data samples.
Note that this method doesn't handle either back propagation or
parameter update, which are supposed to be done in :meth:`train_step`.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (list[:obj:`DetDataSample`] or
list[:obj:`TextDetDataSample`]): The annotation data of every
sample. When in "predict" mode, it should be a list of
:obj:`TextDetDataSample`. Otherwise they are
:obj:`DetDataSample`s. Defaults to None.
mode (str): Running mode. Defaults to 'tensor'.
Returns:
results (Sequence(torch.Tensor)): Output of MMDet models.
"""
if mode == 'predict':
ocr_data_samples = data_samples
data_samples = []
for i in range(len(ocr_data_samples)):
data_samples.append(
DetDataSample(metainfo=ocr_data_samples[i].metainfo))
results = self.wrapped_model.forward(inputs, data_samples, mode, **kwargs)
return results | The unified entry for a forward process in both training and test. The method works in three modes: "tensor", "predict" and "loss": - "tensor": Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - "predict": Forward and return the predictions, which are fully processed to a list of :obj:`DetDataSample`. - "loss": Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle either back propagation or parameter update, which are supposed to be done in :meth:`train_step`. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (list[:obj:`DetDataSample`] or list[:obj:`TextDetDataSample`]): The annotation data of every sample. When in "predict" mode, it should be a list of :obj:`TextDetDataSample`. Otherwise they are :obj:`DetDataSample`s. Defaults to None. mode (str): Running mode. Defaults to 'tensor'. Returns: results (Sequence(torch.Tensor)): Output of MMDet models. |
188,747 | from typing import Sequence
import torch
from mmocr.structures import TextDetDataSample
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `single_stage_text_detector__forward` function. Write a Python function `def single_stage_text_detector__forward( self, batch_inputs: torch.Tensor, data_samples: TextDetDataSample = None, **kwargs) -> Sequence[TextDetDataSample]` to solve the following problem:
Predict results from a batch of inputs and data samples with post- processing. Args: batch_inputs (torch.Tensor): Images of shape (N, C, H, W). data_samples (list[TextDetDataSample]): A list of N datasamples, containing meta information and gold annotations for each of the images. Returns: list[TextDetDataSample]: A list of N datasamples of prediction results. Each DetDataSample usually contain 'pred_instances'. And the ``pred_instances`` usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - polygons (list[np.ndarray]): The length is num_instances. Each element represents the polygon of the instance, in (xn, yn) order.
Here is the function:
def single_stage_text_detector__forward(
self,
batch_inputs: torch.Tensor,
data_samples: TextDetDataSample = None,
**kwargs) -> Sequence[TextDetDataSample]:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (torch.Tensor): Images of shape (N, C, H, W).
data_samples (list[TextDetDataSample]): A list of N
datasamples, containing meta information and gold annotations
for each of the images.
Returns:
list[TextDetDataSample]: A list of N datasamples of prediction
results. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- polygons (list[np.ndarray]): The length is num_instances.
Each element represents the polygon of the
instance, in (xn, yn) order.
"""
x = self.extract_feat(batch_inputs)
return self.det_head.predict(x, data_samples) | Predict results from a batch of inputs and data samples with post- processing. Args: batch_inputs (torch.Tensor): Images of shape (N, C, H, W). data_samples (list[TextDetDataSample]): A list of N datasamples, containing meta information and gold annotations for each of the images. Returns: list[TextDetDataSample]: A list of N datasamples of prediction results. Each DetDataSample usually contain 'pred_instances'. And the ``pred_instances`` usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). - polygons (list[np.ndarray]): The length is num_instances. Each element represents the polygon of the instance, in (xn, yn) order. |
188,748 | from typing import Dict
import torch
from mmocr.utils import DetSampleList
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `base_text_det_head__predict` function. Write a Python function `def base_text_det_head__predict( self, x: torch.Tensor, batch_data_samples: DetSampleList) -> DetSampleList` to solve the following problem:
Rewrite `predict` of BaseTextDetHead for default backend. Rewrite this function to early return the results to avoid post processing. The process is not suitable for exporting to backends and better get implemented in SDK. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: SampleList: Detection results of each image after the post process.
Here is the function:
def base_text_det_head__predict(
self, x: torch.Tensor,
batch_data_samples: DetSampleList) -> DetSampleList:
"""Rewrite `predict` of BaseTextDetHead for default backend.
Rewrite this function to early return the results to avoid post processing.
The process is not suitable for exporting to backends and better get
implemented in SDK.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
SampleList: Detection results of each image
after the post process.
"""
outs = self(x, batch_data_samples)
# early return to avoid decoding outputs from heads to boundaries.
if isinstance(outs, Dict):
return torch.cat([value.unsqueeze(1) for value in outs.values()], 1)
return outs | Rewrite `predict` of BaseTextDetHead for default backend. Rewrite this function to early return the results to avoid post processing. The process is not suitable for exporting to backends and better get implemented in SDK. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: SampleList: Detection results of each image after the post process. |
188,749 | from typing import Dict
import torch
from mmocr.utils import DetSampleList
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `db_head__predict` function. Write a Python function `def db_head__predict(self, x: torch.Tensor, batch_data_samples: DetSampleList) -> DetSampleList` to solve the following problem:
Rewrite to avoid post-process of text detection head. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: SampleList: Detection results of each image after the post process.
Here is the function:
def db_head__predict(self, x: torch.Tensor,
batch_data_samples: DetSampleList) -> DetSampleList:
"""Rewrite to avoid post-process of text detection head.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
SampleList: Detection results of each image
after the post process.
"""
outs = self(x, batch_data_samples, mode='predict')
return outs | Rewrite to avoid post-process of text detection head. Args: x (tuple[Tensor]): Multi-level features from the upstream network, each is a 4D-tensor. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: SampleList: Detection results of each image after the post process. |
188,750 | import torch
import torch.nn.functional as F
from packaging import version
from mmdeploy.core import FUNCTION_REWRITER
func_name='mmocr.models.textdet.FPNC.forward', backend='tensorrt')
def fpnc__forward__tensorrt(self, inputs, **kwargs):
"""Rewrite `forward` of FPNC for tensorrt backend.
Rewrite this function to replace nearest upsampling with bilinear
upsampling. TensorRT-7 backend applies different nearest sampling strategy
from pytorch, which heavily influenced the final performance.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class FPNC.
inputs (Sequence[Tensor]): The feature maps for each scale level with
shape (N, num_anchors * num_classes, H, W)
Returns:
outs (Tensor): A feature map output from FPNC. The tensor shape
(N, C, H, W).
"""
# TensorRT version 8+ matches the upsampling with pytorch
import tensorrt as trt
apply_rewrite = version.parse(trt.__version__) < version.parse('8')
mode = 'bilinear' if apply_rewrite else 'nearest'
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
# build top-down path
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, mode=mode)
# build outputs
# part 1: from original levels
outs = [
self.smooth_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
for i, out in enumerate(outs):
outs[i] = F.interpolate(outs[i], size=outs[0].shape[2:], mode=mode)
out = torch.cat(outs, dim=1)
if self.conv_after_concat:
out = self.out_conv(out)
return out
The provided code snippet includes necessary dependencies for implementing the `fpnc__forward__tensorrt` function. Write a Python function `def fpnc__forward__tensorrt(self, inputs, **kwargs)` to solve the following problem:
Rewrite `forward` of FPNC for tensorrt backend. Rewrite this function to replace nearest upsampling with bilinear upsampling. TensorRT-7 backend applies different nearest sampling strategy from pytorch, which heavily influenced the final performance. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class FPNC. inputs (Sequence[Tensor]): The feature maps for each scale level with shape (N, num_anchors * num_classes, H, W) Returns: outs (Tensor): A feature map output from FPNC. The tensor shape (N, C, H, W).
Here is the function:
def fpnc__forward__tensorrt(self, inputs, **kwargs):
"""Rewrite `forward` of FPNC for tensorrt backend.
Rewrite this function to replace nearest upsampling with bilinear
upsampling. TensorRT-7 backend applies different nearest sampling strategy
from pytorch, which heavily influenced the final performance.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class FPNC.
inputs (Sequence[Tensor]): The feature maps for each scale level with
shape (N, num_anchors * num_classes, H, W)
Returns:
outs (Tensor): A feature map output from FPNC. The tensor shape
(N, C, H, W).
"""
# TensorRT version 8+ matches the upsampling with pytorch
import tensorrt as trt
apply_rewrite = version.parse(trt.__version__) < version.parse('8')
mode = 'bilinear' if apply_rewrite else 'nearest'
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
# build top-down path
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, mode=mode)
# build outputs
# part 1: from original levels
outs = [
self.smooth_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
for i, out in enumerate(outs):
outs[i] = F.interpolate(outs[i], size=outs[0].shape[2:], mode=mode)
out = torch.cat(outs, dim=1)
if self.conv_after_concat:
out = self.out_conv(out)
return out | Rewrite `forward` of FPNC for tensorrt backend. Rewrite this function to replace nearest upsampling with bilinear upsampling. TensorRT-7 backend applies different nearest sampling strategy from pytorch, which heavily influenced the final performance. Args: ctx (ContextCaller): The context with additional information. self: The instance of the class FPNC. inputs (Sequence[Tensor]): The feature maps for each scale level with shape (N, num_anchors * num_classes, H, W) Returns: outs (Tensor): A feature map output from FPNC. The tensor shape (N, C, H, W). |
188,751 | from typing import List, Optional, Sequence, Union
import torch
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement, PixelData
from torch import nn
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
get_root_logger, load_config)
__BACKEND_MODEL = Registry('backend_segmentors')
The provided code snippet includes necessary dependencies for implementing the `build_segmentation_model` function. Write a Python function `def build_segmentation_model( model_files: Sequence[str], model_cfg: Union[str, Config], deploy_cfg: Union[str, Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build object segmentation model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: BaseBackendModel: Segmentor for a configured backend.
Here is the function:
def build_segmentation_model(
model_files: Sequence[str],
model_cfg: Union[str, Config],
deploy_cfg: Union[str, Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build object segmentation model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | mmengine.Config): Input model config file or Config
object.
deploy_cfg (str | mmengine.Config): Input deployment config file or
Config object.
device (str): Device to input model.
data_preprocessor (BaseDataPreprocessor | Config): The data
preprocessor of the model.
Returns:
BaseBackendModel: Segmentor for a configured backend.
"""
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_segmentor = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_segmentor | Build object segmentation model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: BaseBackendModel: Segmentor for a configured backend. |
188,752 | import os.path as osp
from collections import defaultdict
from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmcv
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import (Codebase, Task, get_codebase_config,
get_input_shape, get_root_logger)
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: mmengine.Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: mmengine.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (mmengine.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmengine.Config: the model config after processing.
"""
cfg = deepcopy(model_cfg)
if isinstance(imgs[0], np.ndarray):
# set loading pipeline type
cfg.test_pipeline[0].type = 'LoadImageFromNDArray'
# remove some training related pipeline
removed_indices = []
for i in range(len(cfg.test_pipeline)):
if cfg.test_pipeline[i]['type'] in ['LoadAnnotations']:
removed_indices.append(i)
for i in reversed(removed_indices):
cfg.test_pipeline.pop(i)
# for static exporting
if input_shape is not None:
found_resize = False
for i in range(len(cfg.test_pipeline)):
if 'Resize' == cfg.test_pipeline[i]['type']:
cfg.test_pipeline[i]['scale'] = tuple(input_shape)
cfg.test_pipeline[i]['keep_ratio'] = False
found_resize = True
if not found_resize:
logger = get_root_logger()
logger.warning(
f'Not found Resize in test_pipeline: {cfg.test_pipeline}')
return cfg | Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing. |
188,753 | import os.path as osp
from collections import defaultdict
from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmcv
import mmengine
import numpy as np
import torch
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import (Codebase, Task, get_codebase_config,
get_input_shape, get_root_logger)
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg (Config): Input model Config object. Returns: (list[str], list[np.ndarray]): Class names and palette.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg (Config): Input model Config object.
Returns:
(list[str], list[np.ndarray]): Class names and palette.
"""
from mmseg import datasets # noqa
from mmseg.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
dataset_mmseg = module_dict.get(dataset_cfg.type, None)
if dataset_mmseg is None:
continue
if hasattr(dataset_mmseg, '_load_metainfo') and isinstance(
dataset_mmseg._load_metainfo, Callable):
meta = dataset_mmseg._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_mmseg, 'METAINFO'):
return dataset_mmseg.METAINFO
return None | Get metainfo of dataset. Args: model_cfg (Config): Input model Config object. Returns: (list[str], list[np.ndarray]): Class names and palette. |
188,754 | from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_root_logger
TENSORRT_MAX_TOPK = 3840
The provided code snippet includes necessary dependencies for implementing the `point_head__get_points_test__tensorrt` function. Write a Python function `def point_head__get_points_test__tensorrt(self, seg_logits, uncertainty_func, cfg)` to solve the following problem:
Sample points for testing. 1. set `num_points` no greater than TENSORRT_MAX_TOPK for tensorrt backend Args: seg_logits (Tensor): A tensor of shape (batch_size, num_classes, height, width) for class-specific or class-agnostic prediction. uncertainty_func (func): uncertainty calculation function. cfg (dict): Testing config of point head. Returns: point_indices (Tensor): A tensor of shape (batch_size, num_points) that contains indices from [0, height x width) of the most uncertain points. point_coords (Tensor): A tensor of shape (batch_size, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the ``height x width`` grid .
Here is the function:
def point_head__get_points_test__tensorrt(self, seg_logits, uncertainty_func,
cfg):
"""Sample points for testing.
1. set `num_points` no greater than TENSORRT_MAX_TOPK for tensorrt backend
Args:
seg_logits (Tensor): A tensor of shape (batch_size, num_classes,
height, width) for class-specific or class-agnostic prediction.
uncertainty_func (func): uncertainty calculation function.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (batch_size, num_points)
that contains indices from [0, height x width) of the most
uncertain points.
point_coords (Tensor): A tensor of shape (batch_size, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the ``height x width`` grid .
"""
ctx = FUNCTION_REWRITER.get_context()
from mmdeploy.utils.constants import TENSORRT_MAX_TOPK
if cfg.subdivision_num_points > TENSORRT_MAX_TOPK:
logger = get_root_logger()
logger.warning(f'cfg.subdivision_num_points would be changed from '
f'{cfg.subdivision_num_points} to {TENSORRT_MAX_TOPK} '
f'due to restriction in TensorRT TopK layer ')
cfg.subdivision_num_points = TENSORRT_MAX_TOPK
return ctx.origin_func(self, seg_logits, uncertainty_func, cfg) | Sample points for testing. 1. set `num_points` no greater than TENSORRT_MAX_TOPK for tensorrt backend Args: seg_logits (Tensor): A tensor of shape (batch_size, num_classes, height, width) for class-specific or class-agnostic prediction. uncertainty_func (func): uncertainty calculation function. cfg (dict): Testing config of point head. Returns: point_indices (Tensor): A tensor of shape (batch_size, num_points) that contains indices from [0, height x width) of the most uncertain points. point_coords (Tensor): A tensor of shape (batch_size, num_points, 2) that contains [0, 1] x [0, 1] normalized coordinates of the most uncertain points from the ``height x width`` grid . |
188,755 | import torch
import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `ema_module__forward` function. Write a Python function `def ema_module__forward(self, feats)` to solve the following problem:
Rewrite `forward` for default backend. Replace torch.einsum with other operations. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. feats (Tensor): Input feature. Returns: torch.Tensor: Output feature.
Here is the function:
def ema_module__forward(self, feats):
"""Rewrite `forward` for default backend.
Replace torch.einsum with other operations.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
feats (Tensor): Input feature.
Returns:
torch.Tensor: Output feature.
"""
batch_size, channels, height, width = feats.size()
# [batch_size, channels, height*width]
feats = feats.view(batch_size, channels, height * width)
# [batch_size, channels, num_bases]
bases = self.bases.repeat(batch_size, 1, 1)
with torch.no_grad():
for i in range(self.num_stages):
# [batch_size, height*width, num_bases]
attention = torch.bmm(feats.transpose(1, 2), bases)
# attention = torch.einsum('bcn,bck->bnk', feats, bases)
attention = F.softmax(attention, dim=2)
# l1 norm
attention_normed = F.normalize(attention, dim=1, p=1)
# [batch_size, channels, num_bases]
bases = torch.bmm(feats, attention_normed)
# bases = torch.einsum('bcn,bnk->bck', feats, attention_normed)
# l2 norm
bases = F.normalize(bases, dim=1, p=2)
feats_recon = torch.bmm(bases, attention.transpose(1, 2))
# feats_recon = torch.einsum('bck,bnk->bcn', bases, attention)
feats_recon = feats_recon.view(batch_size, channels, height, width)
return feats_recon | Rewrite `forward` for default backend. Replace torch.einsum with other operations. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. feats (Tensor): Input feature. Returns: torch.Tensor: Output feature. |
188,756 | from mmdeploy.core import FUNCTION_REWRITER, mark
The provided code snippet includes necessary dependencies for implementing the `base_decode_head__cls_seg__vacc` function. Write a Python function `def base_decode_head__cls_seg__vacc(self, feat)` to solve the following problem:
Classify each pixel.
Here is the function:
def base_decode_head__cls_seg__vacc(self, feat):
"""Classify each pixel."""
ctx = FUNCTION_REWRITER.get_context()
feat = ctx.origin_func(self, feat)
# mark seg_maps
@mark('seg_maps', outputs=['output'])
def __mark_feat(feat):
return feat
feat = __mark_feat(feat)
return feat | Classify each pixel. |
188,757 | from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `cascade_encoder_decoder__predict` function. Write a Python function `def cascade_encoder_decoder__predict(self, inputs, data_samples, **kwargs)` to solve the following problem:
Rewrite `predict` for default backend. 1. only support mode=`whole` inference 2. skip calling self.postprocess_result Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (SampleList): The seg data samples. Returns: torch.Tensor: Output segmentation logits of shape [N, C, H, W].
Here is the function:
def cascade_encoder_decoder__predict(self, inputs, data_samples, **kwargs):
"""Rewrite `predict` for default backend.
1. only support mode=`whole` inference
2. skip calling self.postprocess_result
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
inputs (Tensor): Inputs with shape (N, C, H, W).
data_samples (SampleList): The seg data samples.
Returns:
torch.Tensor: Output segmentation logits of shape [N, C, H, W].
"""
batch_img_metas = []
for data_sample in data_samples:
batch_img_metas.append(data_sample.metainfo)
x = self.extract_feat(inputs)
out = self.decode_head[0].forward(x)
for i in range(1, self.num_stages - 1):
out = self.decode_head[i].forward(x, out)
seg_logit = self.decode_head[-1].predict(x, out, batch_img_metas,
self.test_cfg)
return seg_logit | Rewrite `predict` for default backend. 1. only support mode=`whole` inference 2. skip calling self.postprocess_result Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (SampleList): The seg data samples. Returns: torch.Tensor: Output segmentation logits of shape [N, C, H, W]. |
188,758 | from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `encoder_decoder__predict` function. Write a Python function `def encoder_decoder__predict(self, inputs, data_samples, **kwargs)` to solve the following problem:
Rewrite `predict` for default backend. 1. only support mode=`whole` inference 2. skip calling self.postprocess_result Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (SampleList): The seg data samples. Returns: torch.Tensor: Output segmentation logits of shape [N, C, H, W].
Here is the function:
def encoder_decoder__predict(self, inputs, data_samples, **kwargs):
"""Rewrite `predict` for default backend.
1. only support mode=`whole` inference
2. skip calling self.postprocess_result
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
inputs (Tensor): Inputs with shape (N, C, H, W).
data_samples (SampleList): The seg data samples.
Returns:
torch.Tensor: Output segmentation logits of shape [N, C, H, W].
"""
batch_img_metas = []
for data_sample in data_samples:
batch_img_metas.append(data_sample.metainfo)
x = self.extract_feat(inputs)
seg_logit = self.decode_head.predict(x, batch_img_metas, self.test_cfg)
return seg_logit | Rewrite `predict` for default backend. 1. only support mode=`whole` inference 2. skip calling self.postprocess_result Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. inputs (Tensor): Inputs with shape (N, C, H, W). data_samples (SampleList): The seg data samples. Returns: torch.Tensor: Output segmentation logits of shape [N, C, H, W]. |
188,759 | import torch
from mmseg.structures import SegDataSample
from mmdeploy.core import FUNCTION_REWRITER, mark
from mmdeploy.utils import get_codebase_config, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `base_segmentor__forward` function. Write a Python function `def base_segmentor__forward(self, inputs, data_samples=None, mode='predict', **kwargs)` to solve the following problem:
Rewrite `forward` for default backend. Support configured dynamic/static shape for model input. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. inputs (Tensor | List[Tensor]): Input image tensor(s). data_samples (List[dict]): List of dicts containing image's meta information such as `img_shape`. Returns: torch.Tensor: Output segmentation map pf shape [N, 1, H, W].
Here is the function:
def base_segmentor__forward(self,
inputs,
data_samples=None,
mode='predict',
**kwargs):
"""Rewrite `forward` for default backend.
Support configured dynamic/static shape for model input.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
inputs (Tensor | List[Tensor]): Input image tensor(s).
data_samples (List[dict]): List of dicts containing image's meta
information such as `img_shape`.
Returns:
torch.Tensor: Output segmentation map pf shape [N, 1, H, W].
"""
# mark seg_input
@mark('segmentor_forward', outputs=['input'])
def __mark_input(inputs):
return inputs
inputs = __mark_input(inputs)
ctx = FUNCTION_REWRITER.get_context()
if data_samples is None:
data_samples = [SegDataSample()]
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
# get origin input shape as tensor to support onnx dynamic shape
img_shape = inputs.shape[2:]
if not is_dynamic_flag:
img_shape = [int(val) for val in img_shape]
for data_sample in data_samples:
data_sample.set_field(
name='img_shape', value=img_shape, field_type='metainfo')
seg_logit = self.predict(inputs, data_samples)
# mark seg_head
@mark('decode_head', outputs=['output'])
def __mark_seg_logit(seg_logit):
return seg_logit
ctx = FUNCTION_REWRITER.get_context()
with_argmax = get_codebase_config(ctx.cfg).get('with_argmax', True)
# deal with out_channels=1 with two classes
if seg_logit.shape[1] == 1:
seg_logit = seg_logit.sigmoid()
seg_pred = seg_logit > self.decode_head.threshold
seg_pred = seg_pred.to(torch.int64)
else:
seg_pred = __mark_seg_logit(seg_logit)
if with_argmax:
seg_pred = seg_pred.argmax(dim=1, keepdim=True)
return seg_pred | Rewrite `forward` for default backend. Support configured dynamic/static shape for model input. Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. inputs (Tensor | List[Tensor]): Input image tensor(s). data_samples (List[dict]): List of dicts containing image's meta information such as `img_shape`. Returns: torch.Tensor: Output segmentation map pf shape [N, 1, H, W]. |
188,760 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `up_conv_block__forward` function. Write a Python function `def up_conv_block__forward(self, skip, x)` to solve the following problem:
Rewrite `forward` for default backend. To support dynamic shape for UNet backbone, upsample feature maps with `size` instead of `scale_factor` Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. skip (Tensor): Skip branch feature. x (Tensor): Input feature to be upsampled. Returns: Tensor: Upsampled output feature map.
Here is the function:
def up_conv_block__forward(self, skip, x):
"""Rewrite `forward` for default backend.
To support dynamic shape for UNet backbone,
upsample feature maps with `size` instead of `scale_factor`
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the original class.
skip (Tensor): Skip branch feature.
x (Tensor): Input feature to be upsampled.
Returns:
Tensor: Upsampled output feature map.
"""
ctx = FUNCTION_REWRITER.get_context()
from mmcv.cnn import ConvModule
# only valid when self.upsample is from build_upsample_layer
if is_dynamic_shape(ctx.cfg) and not isinstance(self.upsample, ConvModule):
# upsample with `size` instead of `scale_factor`
from mmseg.models.utils import Upsample
for c in self.upsample.interp_upsample:
if isinstance(c, Upsample):
c.size = skip.shape[-2:]
c.scale_factor = None
x = self.upsample(x)
out = torch.cat([skip, x], dim=1)
out = self.conv_block(out)
return out | Rewrite `forward` for default backend. To support dynamic shape for UNet backbone, upsample feature maps with `size` instead of `scale_factor` Args: ctx (ContextCaller): The context with additional information. self: The instance of the original class. skip (Tensor): Skip branch feature. x (Tensor): Input feature to be upsampled. Returns: Tensor: Upsampled output feature map. |
188,761 | import numpy as np
import torch
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
def _dist_torch(point1, point2):
"""Calculate the distance between two points.
Args:
point1 (torch.Tensor): shape(n, 2).
point2 (torch.Tensor): shape(n, 2).
Returns:
distance (torch.Tensor): shape(n, 1).
"""
return torch.norm(point1 - point2, dim=-1)
'mmrotate.structures.bbox.box_converters.qbox2rbox')
The provided code snippet includes necessary dependencies for implementing the `qbox2rbox__default` function. Write a Python function `def qbox2rbox__default(boxes: Tensor) -> Tensor` to solve the following problem:
Convert quadrilateral boxes to rotated boxes. Implement with PyTorch.
Here is the function:
def qbox2rbox__default(boxes: Tensor) -> Tensor:
"""Convert quadrilateral boxes to rotated boxes.
Implement with PyTorch.
"""
polys = boxes
points = torch.reshape(polys, [*polys.shape[:-1], 4, 2])
cxs = torch.unsqueeze(torch.sum(points[..., 0], axis=-1), axis=-1) * 0.25
cys = torch.unsqueeze(torch.sum(points[..., 1], axis=-1), axis=-1) * 0.25
_ws = torch.unsqueeze(
_dist_torch(points[..., 0, :], points[..., 1, :]), axis=-1)
_hs = torch.unsqueeze(
_dist_torch(points[..., 1, :], points[..., 2, :]), axis=-1)
_thetas = torch.unsqueeze(
torch.atan2(-(points[..., 1, 0] - points[..., 0, 0]),
points[..., 1, 1] - points[..., 0, 1]),
axis=-1)
odd = torch.eq(torch.remainder((_thetas / (np.pi * 0.5)).floor_(), 2), 0)
ws = torch.where(odd, _hs, _ws)
hs = torch.where(odd, _ws, _hs)
thetas = torch.remainder(_thetas, np.pi * 0.5)
rbboxes = torch.cat([cxs, cys, ws, hs, thetas], axis=-1)
return rbboxes | Convert quadrilateral boxes to rotated boxes. Implement with PyTorch. |
188,762 | import copy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Codebase, Task
from mmdeploy.utils.config_utils import get_input_shape, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `replace_RResize` function. Write a Python function `def replace_RResize(pipelines)` to solve the following problem:
Rename RResize to Resize. args: pipelines (list[dict]): Data pipeline configs. Returns: list: The new pipeline list with all RResize renamed to Resize.
Here is the function:
def replace_RResize(pipelines):
"""Rename RResize to Resize.
args:
pipelines (list[dict]): Data pipeline configs.
Returns:
list: The new pipeline list with all RResize renamed to
Resize.
"""
pipelines = copy.deepcopy(pipelines)
for i, pipeline in enumerate(pipelines):
if pipeline['type'] == 'MultiScaleFlipAug':
assert 'transforms' in pipeline
pipeline['transforms'] = replace_RResize(pipeline['transforms'])
elif pipeline.type == 'RResize':
pipelines[i].type = 'Resize'
if 'keep_ratio' not in pipelines[i]:
pipelines[i]['keep_ratio'] = True # default value
return pipelines | Rename RResize to Resize. args: pipelines (list[dict]): Data pipeline configs. Returns: list: The new pipeline list with all RResize renamed to Resize. |
188,763 | import copy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Codebase, Task
from mmdeploy.utils.config_utils import get_input_shape, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
Config: the model config after processing.
"""
cfg = model_cfg.copy()
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
# set loading pipeline type
cfg.test_pipeline[0].type = 'mmdet.LoadImageFromNDArray'
pipeline = cfg.test_pipeline
# for static exporting
if input_shape is not None:
for i, transform in enumerate(pipeline):
if transform.type in ['Resize', 'mmdet.Resize']:
pipeline[i].keep_ratio = False
pipeline[i].scale = tuple(input_shape)
pipeline = [
transform for transform in pipeline
if transform.type != 'LoadAnnotations'
]
cfg.test_pipeline = pipeline
return cfg | Process the model config. Args: model_cfg (Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: Config: the model config after processing. |
188,764 | import copy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from mmengine import Config
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Codebase, Task
from mmdeploy.utils.config_utils import get_input_shape, is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
from mmrotate import datasets # noqa
from mmrotate.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
dataset_cls = module_dict.get(dataset_cfg.type, None)
if dataset_cls is None:
continue
if hasattr(dataset_cls, '_load_metainfo') and isinstance(
dataset_cls._load_metainfo, Callable):
meta = dataset_cls._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_cls, 'METAINFO'):
return dataset_cls.METAINFO
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |
188,765 | from typing import Any, List, Optional, Sequence, Union
import numpy as np
import torch
from mmdet.structures.bbox import scale_boxes
from mmengine import Config, Registry
from mmengine.model.base_model.data_preprocessor import BaseDataPreprocessor
from mmengine.structures import BaseDataElement, InstanceData
from mmrotate.structures.bbox import RotatedBoxes
from torch import nn
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
load_config)
__BACKEND_MODEL = Registry('backend_detectors')
The provided code snippet includes necessary dependencies for implementing the `build_rotated_detection_model` function. Write a Python function `def build_rotated_detection_model( model_files: Sequence[str], deploy_cfg: Union[str, Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build rotated detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model. Returns: BaseBackendModel: Rotated detector for a configured backend.
Here is the function:
def build_rotated_detection_model(
model_files: Sequence[str],
deploy_cfg: Union[str, Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build rotated detection model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | Config): Input model config file or Config
object.
deploy_cfg (str | Config): Input deployment config file or
Config object.
device (str): Device to input model.
Returns:
BaseBackendModel: Rotated detector for a configured backend.
"""
# load cfg if necessary
deploy_cfg, = load_config(deploy_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_rotated_detector = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_rotated_detector | Build rotated detection model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model. Returns: BaseBackendModel: Rotated detector for a configured backend. |
188,766 | from typing import List, Optional
import torch
from mmdet.structures.bbox import BaseBoxes, get_box_tensor
from mmengine import ConfigDict
from mmrotate.structures.bbox import rbox2hbox
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import (gather_topk,
get_post_processing_params,
pad_with_value_if_necessary)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
from mmdeploy.utils import is_dynamic_shape
The provided code snippet includes necessary dependencies for implementing the `rpn_head__predict_by_feat` function. Write a Python function `def rpn_head__predict_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], score_factors: Optional[List[Tensor]] = None, batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True, **kwargs)` to solve the following problem:
Rewrite `predict_by_feat` of `OrientedRPNHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness
Here is the function:
def rpn_head__predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True,
**kwargs):
"""Rewrite `predict_by_feat` of `OrientedRPNHead` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
ctx (ContextCaller): The context with additional information.
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
score_factors (list[Tensor], optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Defaults to None.
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
If with_nms == True:
tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels),
`dets` of shape [N, num_det, 5] and `labels` of shape
[N, num_det].
Else:
tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes,
batch_mlvl_scores, batch_mlvl_centerness
"""
ctx = FUNCTION_REWRITER.get_context()
img_metas = batch_img_metas
assert len(cls_scores) == len(bbox_preds)
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
# anchor could be subclass of BaseBoxes in mmrotate
prior_type = type(mlvl_anchors[0])
mlvl_anchors = [get_box_tensor(priors) for priors in mlvl_anchors]
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(mlvl_anchors)
cfg = self.test_cfg if cfg is None else cfg
batch_size = mlvl_cls_scores[0].shape[0]
pre_topk = cfg.get('nms_pre', -1)
# loop over features, decode boxes
mlvl_valid_bboxes = []
mlvl_scores = []
mlvl_valid_anchors = []
for level_id, cls_score, bbox_pred, anchors in zip(
range(num_levels), mlvl_cls_scores, mlvl_bbox_preds, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(0, 2, 3, 1)
if self.use_sigmoid_cls:
cls_score = cls_score.reshape(batch_size, -1)
scores = cls_score.sigmoid()
else:
cls_score = cls_score.reshape(batch_size, -1, 2)
# We set FG labels to [0, num_class-1] and BG label to
# num_class in RPN head since mmdet v2.5, which is unified to
# be consistent with other head since mmdet v2.0. In mmdet v2.0
# to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
scores = cls_score.softmax(-1)[..., 0]
scores = scores.reshape(batch_size, -1, 1)
dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, dim)
# use static anchor if input shape is static
if not is_dynamic_flag:
anchors = anchors.data
anchors = anchors.unsqueeze(0)
# topk in tensorrt does not support shape<k
# concate zero to enable topk,
scores = pad_with_value_if_necessary(scores, 1, pre_topk, 0.)
bbox_pred = pad_with_value_if_necessary(bbox_pred, 1, pre_topk)
anchors = pad_with_value_if_necessary(anchors, 1, pre_topk)
if pre_topk > 0:
_, topk_inds = scores.squeeze(2).topk(pre_topk)
bbox_pred, scores = gather_topk(
bbox_pred,
scores,
inds=topk_inds,
batch_size=batch_size,
is_batched=True)
anchors = gather_topk(
anchors,
inds=topk_inds,
batch_size=batch_size,
is_batched=False)
mlvl_valid_bboxes.append(bbox_pred)
mlvl_scores.append(scores)
mlvl_valid_anchors.append(anchors)
batch_mlvl_bboxes = torch.cat(mlvl_valid_bboxes, dim=1)
batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1)
if issubclass(prior_type, BaseBoxes):
batch_mlvl_anchors = prior_type(batch_mlvl_anchors, clone=False)
batch_mlvl_bboxes = self.bbox_coder.decode(
batch_mlvl_anchors,
batch_mlvl_bboxes,
max_shape=img_metas[0]['img_shape'])
batch_mlvl_bboxes = get_box_tensor(batch_mlvl_bboxes)
# ignore background class
if not self.use_sigmoid_cls:
batch_mlvl_scores = batch_mlvl_scores[..., :self.num_classes]
if not with_nms:
return batch_mlvl_bboxes, batch_mlvl_scores
post_params = get_post_processing_params(deploy_cfg)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
# only one class in rpn
max_output_boxes_per_class = keep_top_k
nms_type = cfg.nms.get('type')
hbboxes = rbox2hbox(batch_mlvl_bboxes)
dets, labels, index = multiclass_nms(
hbboxes,
batch_mlvl_scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=True)
dets = torch.cat([batch_mlvl_bboxes, batch_mlvl_scores], dim=-1)
# in case index == -1
dets = torch.cat([dets, dets[:, :1, :] * 0], dim=1)
batch_inds = torch.arange(batch_size, device=device).view(-1, 1)
dets = dets[batch_inds, index, :]
return dets, labels | Rewrite `predict_by_feat` of `OrientedRPNHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: ctx (ContextCaller): The context with additional information. cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: If with_nms == True: tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5] and `labels` of shape [N, num_det]. Else: tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness |
188,767 | from typing import List, Optional, Tuple
import torch
from mmengine.config import ConfigDict
from mmrotate.structures import norm_angle
from torch import Tensor
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops.nms_rotated import multiclass_nms_rotated
def multiclass_nms_rotated(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.1,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1):
"""Wrapper function for `_multiclass_nms`."""
return mmdeploy.mmcv.ops.nms_rotated._multiclass_nms_rotated(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
The provided code snippet includes necessary dependencies for implementing the `rotated_rtmdet_head__predict_by_feat` function. Write a Python function `def rotated_rtmdet_head__predict_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], angle_preds: List[Tensor], batch_img_metas: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> Tuple[Tensor]` to solve the following problem:
Rewrite `predict_by_feat` of `Rotated RTMDet` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). angle_preds (list[Tensor]): Box angle for each scale level with shape (batch_size, num_priors * angle_dim, H, W) batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 6) tensor, where 5 represent (x, y, w, h, angle, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box.
Here is the function:
def rotated_rtmdet_head__predict_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
angle_preds: List[Tensor],
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> Tuple[Tensor]:
"""Rewrite `predict_by_feat` of `Rotated RTMDet` for default backend.
Rewrite this function to deploy model, transform network output for a
batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
angle_preds (list[Tensor]): Box angle for each scale level
with shape (batch_size, num_priors * angle_dim, H, W)
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
tuple[Tensor, Tensor]: The first item is an (N, num_box, 6) tensor,
where 5 represent (x, y, w, h, angle, score), N is batch
size and the score between 0 and 1. The shape of the second
tensor in the tuple is (N, num_box), and each element
represents the class label of the corresponding box.
"""
ctx = FUNCTION_REWRITER.get_context()
assert len(cls_scores) == len(bbox_preds)
device = cls_scores[0].device
cfg = self.test_cfg if cfg is None else cfg
batch_size = bbox_preds[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, device=device)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
for bbox_pred in bbox_preds
]
flatten_angle_preds = [
angle_pred.permute(0, 2, 3, 1).reshape(batch_size, -1,
self.angle_coder.encode_size)
for angle_pred in angle_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_angle_preds = torch.cat(flatten_angle_preds, dim=1)
priors = torch.cat(mlvl_priors)
angle = self.angle_coder.decode(flatten_angle_preds, keepdim=True)
distance = flatten_bbox_preds
cos_angle, sin_angle = torch.cos(angle), torch.sin(angle)
rot_matrix = torch.cat([cos_angle, -sin_angle, sin_angle, cos_angle],
dim=-1)
rot_matrix = rot_matrix.reshape(*rot_matrix.shape[:-1], 2, 2)
wh = distance[..., :2] + distance[..., 2:]
offset_t = (distance[..., 2:] - distance[..., :2]) / 2
offset_t = offset_t.unsqueeze(-1)
offset = torch.matmul(rot_matrix, offset_t).squeeze(-1)
ctr = priors[..., :2] + offset
angle_regular = norm_angle(angle, self.angle_version)
bboxes = torch.cat([ctr, wh, angle_regular], dim=-1)
# directly multiply score factor and feed to nms
max_scores, _ = torch.max(flatten_cls_scores, 1)
mask = max_scores >= cfg.score_thr
scores = flatten_cls_scores.where(mask, flatten_cls_scores.new_zeros(1))
if not with_nms:
return bboxes, scores
deploy_cfg = ctx.cfg
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
return multiclass_nms_rotated(bboxes, scores, max_output_boxes_per_class,
iou_threshold, score_threshold, pre_top_k,
keep_top_k) | Rewrite `predict_by_feat` of `Rotated RTMDet` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. Args: cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). angle_preds (list[Tensor]): Box angle for each scale level with shape (batch_size, num_priors * angle_dim, H, W) batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: tuple[Tensor, Tensor]: The first item is an (N, num_box, 6) tensor, where 5 represent (x, y, w, h, angle, score), N is batch size and the score between 0 and 1. The shape of the second tensor in the tuple is (N, num_box), and each element represents the class label of the corresponding box. |
188,768 | import torch
from mmdet.structures.bbox import get_box_tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `gvfixcoder__decode` function. Write a Python function `def gvfixcoder__decode(self, hboxes, fix_deltas)` to solve the following problem:
Rewriter for GVFixCoder decode, support more dimension input.
Here is the function:
def gvfixcoder__decode(self, hboxes, fix_deltas):
"""Rewriter for GVFixCoder decode, support more dimension input."""
assert hboxes.size(
-1) == 4, f'expect hboxes.size(-1)==4 get {hboxes.size(-1)}.'
hboxes = get_box_tensor(hboxes)
x1 = hboxes[..., 0::4]
y1 = hboxes[..., 1::4]
x2 = hboxes[..., 2::4]
y2 = hboxes[..., 3::4]
w = hboxes[..., 2::4] - hboxes[..., 0::4]
h = hboxes[..., 3::4] - hboxes[..., 1::4]
pred_t_x = x1 + w * fix_deltas[..., 0::4]
pred_r_y = y1 + h * fix_deltas[..., 1::4]
pred_d_x = x2 - w * fix_deltas[..., 2::4]
pred_l_y = y2 - h * fix_deltas[..., 3::4]
polys = torch.stack(
[pred_t_x, y1, x2, pred_r_y, pred_d_x, y2, x1, pred_l_y], dim=-1)
polys = polys.flatten(2)
return polys | Rewriter for GVFixCoder decode, support more dimension input. |
188,769 | from mmcv.ops import RoIAlignRotated
from torch.autograd import Function
from mmdeploy.core.optimizers import mark
from mmdeploy.core.rewriters import FUNCTION_REWRITER
class MultiLevelRotatedRoiAlign(Function):
"""Create MMCVMultiLevelRotatedRoiAlign op.
This class is used to create a MultiLevelRotatedRoiAlign in ONNX for the
TensorRT backend.
"""
def __init__(self) -> None:
super().__init__()
def symbolic(g, *args):
"""Symbolic function for creating onnx op."""
aligned = args[-1]
featmap_strides = args[-2]
finest_scale = args[-3]
roi_scale_factor = args[-4]
sampling_ratio = args[-5]
clockwise = args[-6]
output_size = args[-7]
inputs = args[:len(featmap_strides)]
rois = args[len(featmap_strides)]
return g.op(
'mmdeploy::MMCVMultiLevelRotatedRoiAlign',
rois,
*inputs,
output_height_i=output_size[1],
output_width_i=output_size[0],
clockwise_i=clockwise,
sampling_ratio_i=sampling_ratio,
roi_scale_factor_f=roi_scale_factor,
finest_scale_i=finest_scale,
featmap_strides_f=featmap_strides,
aligned_i=aligned)
def forward(g, *args):
"""Run forward."""
# aligned = args[-1]
featmap_strides = args[-2]
# finest_scale = args[-3]
# roi_scale_factor = args[-4]
# sampling_ratio = args[-5]
output_size = args[-7]
inputs = args[:len(featmap_strides)]
rois = args[len(featmap_strides)]
num_proposals = rois.shape[0]
channel = inputs[0].shape[1]
return rois.new_zeros(
(num_proposals, channel, output_size[1], output_size[0]))
'mmrotate.models.roi_heads.roi_extractors.'
'rotate_single_level_roi_extractor.RotatedSingleRoIExtractor.forward',
backend='tensorrt')
'rotated_roi_extractor', inputs=['feats', 'rois'], outputs=['bbox_feats'])
The provided code snippet includes necessary dependencies for implementing the `rotated_single_roi_extractor__forward__tensorrt` function. Write a Python function `def rotated_single_roi_extractor__forward__tensorrt(self, feats, rois, roi_scale_factor=None)` to solve the following problem:
Rewrite `forward` of `RotatedSingleRoIExtractor` for TensorRT backend. This function uses MMCVMultiLevelRoiAlign op for TensorRT deployment.
Here is the function:
def rotated_single_roi_extractor__forward__tensorrt(self,
feats,
rois,
roi_scale_factor=None):
"""Rewrite `forward` of `RotatedSingleRoIExtractor` for TensorRT backend.
This function uses MMCVMultiLevelRoiAlign op for TensorRT deployment.
"""
featmap_strides = self.featmap_strides
finest_scale = self.finest_scale
for roi_layer in self.roi_layers:
assert isinstance(roi_layer, RoIAlignRotated
), f'{type(roi_layer)} is not supported in TensorRT.'
roi_layer = self.roi_layers[0]
out_size = roi_layer.output_size
sampling_ratio = roi_layer.sampling_ratio
clockwise = roi_layer.clockwise
aligned = roi_layer.aligned
if roi_scale_factor is None:
roi_scale_factor = 1.0
featmap_strides = [float(s) for s in featmap_strides]
return MultiLevelRotatedRoiAlign.apply(*feats, rois, out_size, clockwise,
sampling_ratio, roi_scale_factor,
finest_scale, featmap_strides,
aligned) | Rewrite `forward` of `RotatedSingleRoIExtractor` for TensorRT backend. This function uses MMCVMultiLevelRoiAlign op for TensorRT deployment. |
188,770 | from typing import List, Optional, Tuple
import torch.nn.functional as F
from mmdet.structures.bbox import get_box_tensor
from mmdet.utils import InstanceList
from mmengine import ConfigDict
from mmrotate.structures.bbox import QuadriBoxes
from torch import Tensor
from mmdeploy.codebase.mmdet.deploy import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops import multiclass_nms
The provided code snippet includes necessary dependencies for implementing the `gv_bbox_head__predict_by_feat` function. Write a Python function `def gv_bbox_head__predict_by_feat(self, rois: Tuple[Tensor], cls_scores: Tuple[Tensor], bbox_preds: Tuple[Tensor], fix_preds: Tuple[Tensor], ratio_preds: Tuple[Tensor], batch_img_metas: List[dict], rcnn_test_cfg: Optional[ConfigDict] = None, rescale: bool = False) -> InstanceList` to solve the following problem:
Transform network output for a batch into bbox predictions. Args: rois (tuple[Tensor]): Tuple of boxes to be transformed. Each has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_scores (tuple[Tensor]): Tuple of box scores, each has shape (num_boxes, num_classes + 1). bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each has shape (num_boxes, num_classes * 4). fix_preds (tuple[Tensor]): Tuple of fix / deltas, each has shape (num_boxes, num_classes * 4). ratio_preds (tuple[Tensor]): Tuple of ratio / deltas, each has shape (num_boxes, num_classes * 1). batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 8), the last dimension 4 arrange as (x1, y1, ..., x4, y4).
Here is the function:
def gv_bbox_head__predict_by_feat(self,
rois: Tuple[Tensor],
cls_scores: Tuple[Tensor],
bbox_preds: Tuple[Tensor],
fix_preds: Tuple[Tensor],
ratio_preds: Tuple[Tensor],
batch_img_metas: List[dict],
rcnn_test_cfg: Optional[ConfigDict] = None,
rescale: bool = False) -> InstanceList:
"""Transform network output for a batch into bbox predictions.
Args:
rois (tuple[Tensor]): Tuple of boxes to be transformed.
Each has shape (num_boxes, 5). last dimension 5 arrange as
(batch_index, x1, y1, x2, y2).
cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
(num_boxes, num_classes + 1).
bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
has shape (num_boxes, num_classes * 4).
fix_preds (tuple[Tensor]): Tuple of fix / deltas, each
has shape (num_boxes, num_classes * 4).
ratio_preds (tuple[Tensor]): Tuple of ratio / deltas, each
has shape (num_boxes, num_classes * 1).
batch_img_metas (list[dict]): List of image information.
rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Instance segmentation
results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 8),
the last dimension 4 arrange as (x1, y1, ..., x4, y4).
"""
assert rois.ndim == 3, 'Only support export two stage ' \
'model to ONNX ' \
'with batch dimension. '
ctx = FUNCTION_REWRITER.get_context()
img_shape = batch_img_metas[0]['img_shape']
if self.custom_cls_channels:
scores = self.loss_cls.get_activation(cls_scores)
else:
scores = F.softmax(
cls_scores, dim=-1) if cls_scores is not None else None
assert bbox_preds is not None
bboxes = self.bbox_coder.decode(
rois[..., 1:], bbox_preds, max_shape=img_shape)
qboxes = self.fix_coder.decode(bboxes, fix_preds)
bboxes = bboxes.view(*ratio_preds.size(), 4)
qboxes = qboxes.view(*ratio_preds.size(), 8)
from mmrotate.structures.bbox import hbox2qbox
qboxes = qboxes.where(
ratio_preds.unsqueeze(-1) < self.ratio_thr, hbox2qbox(bboxes))
qboxes = qboxes.squeeze(2)
bboxes = QuadriBoxes(qboxes)
if self.predict_box_type == 'rbox':
bboxes = bboxes.detach().convert_to('rbox')
bboxes = get_box_tensor(bboxes)
# ignore background class
scores = scores[..., :self.num_classes]
post_params = get_post_processing_params(ctx.cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = rcnn_test_cfg.nms.get('iou_threshold',
post_params.iou_threshold)
score_threshold = rcnn_test_cfg.get('score_thr',
post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = rcnn_test_cfg.get('max_per_img', post_params.keep_top_k)
nms_type = rcnn_test_cfg.nms.get('type')
return multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
nms_type=nms_type,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k) | Transform network output for a batch into bbox predictions. Args: rois (tuple[Tensor]): Tuple of boxes to be transformed. Each has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_scores (tuple[Tensor]): Tuple of box scores, each has shape (num_boxes, num_classes + 1). bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each has shape (num_boxes, num_classes * 4). fix_preds (tuple[Tensor]): Tuple of fix / deltas, each has shape (num_boxes, num_classes * 4). ratio_preds (tuple[Tensor]): Tuple of ratio / deltas, each has shape (num_boxes, num_classes * 1). batch_img_metas (list[dict]): List of image information. rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[:obj:`InstanceData`]: Instance segmentation results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 8), the last dimension 4 arrange as (x1, y1, ..., x4, y4). |
188,771 | from typing import List, Tuple
import torch
from mmdet.utils import ConfigType, InstanceList
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `gv_ratio_roi_head__predict_bbox` function. Write a Python function `def gv_ratio_roi_head__predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType, rescale: bool = False)` to solve the following problem:
Test only det bboxes without augmentation. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[Tensor]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
Here is the function:
def gv_ratio_roi_head__predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False):
"""Test only det bboxes without augmentation.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[Tensor]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[Tensor]: Detection results of each image
after the post process.
Each item usually contains following keys.
- dets (Tensor): Classification bboxes and scores, has a shape
(num_instance, 5)
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
rois = rpn_results_list[0]
batch_index = torch.arange(
rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 5)
bbox_results = self._bbox_forward(x, rois)
cls_scores = bbox_results['cls_score']
bbox_preds = bbox_results['bbox_pred']
fix_preds = bbox_results['fix_pred']
ratio_preds = bbox_results['ratio_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
cls_scores = cls_scores.reshape(batch_size, num_proposals_per_img,
cls_scores.size(-1))
bbox_preds = bbox_preds.reshape(batch_size, num_proposals_per_img,
bbox_preds.size(-1))
fix_preds = fix_preds.reshape(batch_size, num_proposals_per_img,
fix_preds.size(-1))
ratio_preds = ratio_preds.reshape(batch_size, num_proposals_per_img,
ratio_preds.size(-1))
result_list = self.bbox_head.predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
fix_preds=fix_preds,
ratio_preds=ratio_preds,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=rcnn_test_cfg,
rescale=rescale)
return result_list | Test only det bboxes without augmentation. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[Tensor]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. rescale (bool): If True, return boxes in original image space. Defaults to False. Returns: list[Tensor]: Detection results of each image after the post process. Each item usually contains following keys. - dets (Tensor): Classification bboxes and scores, has a shape (num_instance, 5) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). |
188,772 | import os.path as osp
from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmcv
import numpy as np
import torch
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Codebase, Task, get_root_logger
from mmdeploy.utils.config_utils import get_input_shape
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: Config, imgs: Union[str, np.ndarray], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (Config): The model config. imgs (str | np.ndarray): Input image(s), accepted data type are `str`, `np.ndarray`. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: Config,
imgs: Union[str, np.ndarray],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (Config): The model config.
imgs (str | np.ndarray): Input image(s), accepted data type are `str`,
`np.ndarray`.
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
Config: the model config after processing.
"""
cfg = model_cfg.deepcopy()
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if isinstance(imgs[0], str):
if cfg.test_pipeline[0]['type'] != 'LoadImageFromFile':
cfg.test_pipeline.insert(0, dict(type='LoadImageFromFile'))
else:
if cfg.test_pipeline[0]['type'] == 'LoadImageFromFile':
cfg.test_pipeline.pop(0)
# check whether input_shape is valid
if input_shape is not None:
for pipeline_component in cfg.test_pipeline:
if 'Crop' in pipeline_component['type']:
if 'crop_size' in pipeline_component:
crop_size = pipeline_component['crop_size']
if tuple(input_shape) != (crop_size, crop_size):
logger = get_root_logger()
logger.warning(
f'`input shape` should be equal to `crop_size`: {crop_size},\
but given: {input_shape}')
return cfg | Process the model config. Args: model_cfg (Config): The model config. imgs (str | np.ndarray): Input image(s), accepted data type are `str`, `np.ndarray`. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: Config: the model config after processing. |
188,773 | import os.path as osp
from copy import deepcopy
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmcv
import numpy as np
import torch
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import Codebase, Task, get_root_logger
from mmdeploy.utils.config_utils import get_input_shape
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class.
Here is the function:
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
from mmpretrain import datasets # noqa
from mmpretrain.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
dataset_cls = module_dict.get(dataset_cfg.type, None)
if dataset_cls is None:
continue
if hasattr(dataset_cls, '_load_metainfo') and isinstance(
dataset_cls._load_metainfo, Callable):
meta = dataset_cls._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_cls, 'METAINFO'):
return dataset_cls.METAINFO
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: list[str]: A list of string specifying names of different class. |
188,774 | from typing import Any, List, Optional, Sequence, Union
import numpy as np
import torch
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement
from torch import nn
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
get_root_logger, load_config)
__BACKEND_MODEL = Registry('backend_classifiers')
The provided code snippet includes necessary dependencies for implementing the `build_classification_model` function. Write a Python function `def build_classification_model( model_files: Sequence[str], model_cfg: Union[str, Config], deploy_cfg: Union[str, Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build classification model for different backend. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (BaseDataPreprocessor): The data preprocessor of the model. Default to `None`. **kwargs: Other key-pair arguments. Returns: BaseBackendModel: Classifier for a configured backend.
Here is the function:
def build_classification_model(
model_files: Sequence[str],
model_cfg: Union[str, Config],
deploy_cfg: Union[str, Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build classification model for different backend.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | Config): Input model config file or Config
object.
deploy_cfg (str | Config): Input deployment config file or
Config object.
device (str): Device to input model.
data_preprocessor (BaseDataPreprocessor): The data preprocessor
of the model. Default to `None`.
**kwargs: Other key-pair arguments.
Returns:
BaseBackendModel: Classifier for a configured backend.
"""
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_classifier = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
model_cfg=model_cfg,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_classifier | Build classification model for different backend. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | Config): Input model config file or Config object. deploy_cfg (str | Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (BaseDataPreprocessor): The data preprocessor of the model. Default to `None`. **kwargs: Other key-pair arguments. Returns: BaseBackendModel: Classifier for a configured backend. |
188,775 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import Backend
The provided code snippet includes necessary dependencies for implementing the `gap__forward` function. Write a Python function `def gap__forward(self, inputs)` to solve the following problem:
Rewrite `forward` of GlobalAveragePooling for default backend. Replace `view` with `flatten` to export simple onnx graph. Shape->Gather->Unsqueeze->Concat->Reshape become a Flatten.
Here is the function:
def gap__forward(self, inputs):
"""Rewrite `forward` of GlobalAveragePooling for default backend.
Replace `view` with `flatten` to export simple onnx graph.
Shape->Gather->Unsqueeze->Concat->Reshape become a Flatten.
"""
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple([out.flatten(1) for out in outs])
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.flatten(1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs | Rewrite `forward` of GlobalAveragePooling for default backend. Replace `view` with `flatten` to export simple onnx graph. Shape->Gather->Unsqueeze->Concat->Reshape become a Flatten. |
188,776 | import torch
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `shufflenetv2_backbone__forward__default` function. Write a Python function `def shufflenetv2_backbone__forward__default(self, x)` to solve the following problem:
Rewrite `forward` of InvertedResidual used in shufflenet_v2. The chunk in original InvertedResidual.forward will convert to dynamic `Slice` operator in ONNX, which will raise error in ncnn, torchscript and tensorrt. Here we replace `chunk` with `split`. Args: ctx (ContextCaller): The context with additional information. self (InvertedResidual): The instance of the class InvertedResidual. x (Tensor): Input features of shape (N, Cin, H, W). Returns: out (Tensor): A feature map output from InvertedResidual. The tensor shape (N, Cout, H, W).
Here is the function:
def shufflenetv2_backbone__forward__default(self, x):
"""Rewrite `forward` of InvertedResidual used in shufflenet_v2.
The chunk in original InvertedResidual.forward will convert to dynamic
`Slice` operator in ONNX, which will raise error in ncnn, torchscript
and tensorrt. Here we replace `chunk` with `split`.
Args:
ctx (ContextCaller): The context with additional information.
self (InvertedResidual): The instance of the class InvertedResidual.
x (Tensor): Input features of shape (N, Cin, H, W).
Returns:
out (Tensor): A feature map output from InvertedResidual. The tensor
shape (N, Cout, H, W).
"""
from mmpretrain.models.utils import channel_shuffle
if self.stride > 1:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
else:
assert x.shape[1] % 2 == 0
x1, x2 = torch.split(x, x.shape[1] // 2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
out = channel_shuffle(out, 2)
return out | Rewrite `forward` of InvertedResidual used in shufflenet_v2. The chunk in original InvertedResidual.forward will convert to dynamic `Slice` operator in ONNX, which will raise error in ncnn, torchscript and tensorrt. Here we replace `chunk` with `split`. Args: ctx (ContextCaller): The context with additional information. self (InvertedResidual): The instance of the class InvertedResidual. x (Tensor): Input features of shape (N, Cin, H, W). Returns: out (Tensor): A feature map output from InvertedResidual. The tensor shape (N, Cout, H, W). |
188,777 |
The provided code snippet includes necessary dependencies for implementing the `base_classifier__forward` function. Write a Python function `def base_classifier__forward( self, batch_inputs: Tensor, data_samples: Optional[List[BaseDataElement]] = None, mode: str = 'predict')` to solve the following problem:
Rewrite `forward` of BaseClassifier for default backend. Args: batch_inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[BaseDataElement], optional): The annotation data of every samples. It's required if ``mode="loss"``. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. Returns: return a list of :obj:`mmengine.BaseDataElement`.
Here is the function:
def base_classifier__forward(
self,
batch_inputs: Tensor,
data_samples: Optional[List[BaseDataElement]] = None,
mode: str = 'predict'):
"""Rewrite `forward` of BaseClassifier for default backend.
Args:
batch_inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[BaseDataElement], optional): The annotation
data of every samples. It's required if ``mode="loss"``.
Defaults to None.
mode (str): Return what kind of value. Defaults to 'predict'.
Returns:
return a list of :obj:`mmengine.BaseDataElement`.
"""
output = self.extract_feat(batch_inputs)
if self.head is not None:
output = self.head(output)
from mmpretrain.models.heads import ConformerHead, MultiLabelClsHead
if isinstance(self.head, MultiLabelClsHead):
output = torch.sigmoid(output)
elif isinstance(self.head, ConformerHead):
output = F.softmax(torch.add(output[0], output[1]), dim=1)
else:
output = F.softmax(output, dim=1)
return output | Rewrite `forward` of BaseClassifier for default backend. Args: batch_inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[BaseDataElement], optional): The annotation data of every samples. It's required if ``mode="loss"``. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. Returns: return a list of :obj:`mmengine.BaseDataElement`. |
188,778 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.mmcv.cnn import MultiHeadAttentionop
from mmdeploy.utils import Backend, get_dynamic_axes
The provided code snippet includes necessary dependencies for implementing the `multiheadattention__forward__ncnn` function. Write a Python function `def multiheadattention__forward__ncnn(self, qkv_input)` to solve the following problem:
Rewrite `forward` of MultiheadAttention used in vision_transformer for ncnn backend. Args: ctx (ContextCaller): The context with additional information. self (MultiheadAttention): The instance of the class MultiheadAttention. qkv_input (Tensor): Input features of shape (N, Cin, H, W). Returns: out (Tensor): A feature map output from MultiHeadAttention. The tensor shape (N, Cout, H, W).
Here is the function:
def multiheadattention__forward__ncnn(self, qkv_input):
"""Rewrite `forward` of MultiheadAttention used in vision_transformer for
ncnn backend.
Args:
ctx (ContextCaller): The context with additional information.
self (MultiheadAttention): The instance of the class
MultiheadAttention.
qkv_input (Tensor): Input features of shape (N, Cin, H, W).
Returns:
out (Tensor): A feature map output from MultiHeadAttention. The tensor
shape (N, Cout, H, W).
"""
# split qkv weight and bias
qkv_weight = self.qkv.weight.data.reshape(3, self.input_dims,
self.embed_dims)
q_weight = qkv_weight[0]
k_weight = qkv_weight[1]
v_weight = qkv_weight[2]
qkv_bias = self.qkv.bias.data.reshape(3, self.embed_dims)
q_bias = qkv_bias[0]
k_bias = qkv_bias[1]
v_bias = qkv_bias[2]
# out weight and bias
o_weight = self.proj.weight.data
o_bias = self.proj.bias.data
out = MultiHeadAttentionop.apply(qkv_input, qkv_input, qkv_input, q_weight,
q_bias, k_weight, k_bias, v_weight,
v_bias, o_weight, o_bias, self.embed_dims,
self.num_heads)
return out | Rewrite `forward` of MultiheadAttention used in vision_transformer for ncnn backend. Args: ctx (ContextCaller): The context with additional information. self (MultiheadAttention): The instance of the class MultiheadAttention. qkv_input (Tensor): Input features of shape (N, Cin, H, W). Returns: out (Tensor): A feature map output from MultiHeadAttention. The tensor shape (N, Cout, H, W). |
188,779 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.mmcv.cnn import MultiHeadAttentionop
from mmdeploy.utils import Backend, get_dynamic_axes
The provided code snippet includes necessary dependencies for implementing the `shift_window_msa__forward__default` function. Write a Python function `def shift_window_msa__forward__default(self, query, hw_shape)` to solve the following problem:
Rewrite forward function of ShiftWindowMSA class for TensorRT. 1. replace dynamic padding with static padding and dynamic slice. 2. always do slice `x = x[:, :H, :W, :].contiguous()` for stability.
Here is the function:
def shift_window_msa__forward__default(self, query, hw_shape):
"""Rewrite forward function of ShiftWindowMSA class for TensorRT.
1. replace dynamic padding with static padding and dynamic slice.
2. always do slice `x = x[:, :H, :W, :].contiguous()` for stability.
"""
ctx = FUNCTION_REWRITER.get_context()
if get_dynamic_axes(ctx.cfg) is None:
# avoid the weird bug of torch to onnx
return ctx.origin_func(self, query, hw_shape)
B, L, C = query.shape
H, W = hw_shape
assert L == H * W, f"The query length {L} doesn't match the input "\
f'shape ({H}, {W}).'
query = query.view(B, H, W, C)
window_size = self.window_size
shift_size = self.shift_size
if min(H, W) == window_size:
# If not pad small feature map, avoid shifting when the window size
# is equal to the size of feature map. It's to align with the
# behavior of the original implementation.
shift_size = shift_size if self.pad_small_map else 0
elif min(H, W) < window_size:
# In the original implementation, the window size will be shrunk
# to the size of feature map. The behavior is different with
# swin-transformer for downstream tasks. To support dynamic input
# shape, we don't allow this feature.
assert self.pad_small_map, \
f'The input shape ({H}, {W}) is smaller than the window ' \
f'size ({window_size}). Please set `pad_small_map=True`, or ' \
'decrease the `window_size`.'
# pad feature maps to multiples of window size
query = query.permute(0, 3, 1, 2).contiguous()
# query = torch.nn.ZeroPad2d([0, self.window_size, 0, self.window_size])(
# query)
query = torch.cat([query, query.new_zeros(B, C, H, window_size)], dim=-1)
query = torch.cat(
[query, query.new_zeros(B, C, window_size, query.shape[-1])], dim=-2)
slice_h = (H + window_size - 1) // window_size * window_size
slice_w = (W + window_size - 1) // window_size * window_size
query = query[:, :, :slice_h, :slice_w]
query = query.permute(0, 2, 3, 1).contiguous()
H_pad, W_pad = query.shape[1], query.shape[2]
# cyclic shift
if shift_size > 0:
query = torch.roll(
query, shifts=(-shift_size, -shift_size), dims=(1, 2))
attn_mask = self.get_attn_mask((H_pad, W_pad),
window_size=window_size,
shift_size=shift_size,
device=query.device)
# nW*B, window_size, window_size, C
query_windows = self.window_partition(query, window_size)
# nW*B, window_size*window_size, C
query_windows = query_windows.view(-1, window_size**2, C)
# W-MSA/SW-MSA (nW*B, window_size*window_size, C)
attn_windows = self.w_msa(query_windows, mask=attn_mask)
# merge windows
attn_windows = attn_windows.view(-1, window_size, window_size, C)
# B H' W' C
shifted_x = self.window_reverse(attn_windows, H_pad, W_pad, window_size)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(shift_size, shift_size), dims=(1, 2))
else:
x = shifted_x
if H != H_pad or W != W_pad:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = self.drop(x)
return x | Rewrite forward function of ShiftWindowMSA class for TensorRT. 1. replace dynamic padding with static padding and dynamic slice. 2. always do slice `x = x[:, :H, :W, :].contiguous()` for stability. |
188,780 | import torch
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.core.rewriters.rewriter_utils import LibVersionChecker
from mmdeploy.mmcv.cnn import MultiHeadAttentionop
from mmdeploy.utils import Backend, get_dynamic_axes
The provided code snippet includes necessary dependencies for implementing the `shift_window_msa__get_attn_mask__default` function. Write a Python function `def shift_window_msa__get_attn_mask__default(self, hw_shape, window_size, shift_size, device=None)` to solve the following problem:
Rewrite get_attn_mask function of ShiftWindowMSA class. Replace the loop of setitem with a simpler logic.
Here is the function:
def shift_window_msa__get_attn_mask__default(self,
hw_shape,
window_size,
shift_size,
device=None):
"""Rewrite get_attn_mask function of ShiftWindowMSA class.
Replace the loop of setitem with a simpler logic.
"""
if shift_size > 0:
# calculate attention mask for SW-MSA
w_mask = torch.cat([
torch.zeros((hw_shape[1] - window_size),
dtype=torch.int64,
device=device),
torch.full((window_size - shift_size, ), 1, device=device),
torch.full((shift_size, ), 2, device=device)
])
h_mask = torch.cat([
torch.zeros((hw_shape[0] - window_size),
dtype=torch.int64,
device=device),
torch.full((window_size - shift_size, ), 3, device=device),
torch.full((shift_size, ), 6, device=device)
])
img_mask = w_mask.unsqueeze(0) + h_mask.unsqueeze(1)
img_mask = img_mask.unsqueeze(0)
img_mask = img_mask.unsqueeze(-1)
# nW, window_size, window_size, 1
from mmpretrain.models.utils import ShiftWindowMSA
mask_windows = ShiftWindowMSA.window_partition(img_mask, window_size)
mask_windows = mask_windows.view(-1, window_size * window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0)
attn_mask = attn_mask.masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask | Rewrite get_attn_mask function of ShiftWindowMSA class. Replace the loop of setitem with a simpler logic. |
188,781 | from itertools import zip_longest
from typing import List, Optional, Sequence, Union
import mmengine
import torch
import torch.nn as nn
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement, InstanceData
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
load_config)
__BACKEND_MODEL = Registry('backend_segmentors')
The provided code snippet includes necessary dependencies for implementing the `build_pose_detection_model` function. Write a Python function `def build_pose_detection_model( model_files: Sequence[str], model_cfg: Union[str, mmengine.Config], deploy_cfg: Union[str, mmengine.Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build object segmentation model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (Config | BaseDataPreprocessor | None): Input data pre-processor. Default is ``None``. Returns: BaseBackendModel: Pose model for a configured backend.
Here is the function:
def build_pose_detection_model(
model_files: Sequence[str],
model_cfg: Union[str, mmengine.Config],
deploy_cfg: Union[str, mmengine.Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build object segmentation model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | mmengine.Config): Input model config file or Config
object.
deploy_cfg (str | mmengine.Config): Input deployment config file or
Config object.
device (str): Device to input model.
data_preprocessor (Config | BaseDataPreprocessor | None): Input data
pre-processor. Default is ``None``.
Returns:
BaseBackendModel: Pose model for a configured backend.
"""
from mmpose.models.data_preprocessors import PoseDataPreprocessor
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
if isinstance(data_preprocessor, dict):
dp = data_preprocessor.copy()
dp_type = dp.pop('type')
if dp_type == 'mmdet.DetDataPreprocessor':
from mmdet.models.data_preprocessors import DetDataPreprocessor
data_preprocessor = DetDataPreprocessor(**dp)
else:
assert dp_type == 'PoseDataPreprocessor'
data_preprocessor = PoseDataPreprocessor(**dp)
backend_pose_model = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
deploy_cfg=deploy_cfg,
model_cfg=model_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_pose_model | Build object segmentation model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (Config | BaseDataPreprocessor | None): Input data pre-processor. Default is ``None``. Returns: BaseBackendModel: Pose model for a configured backend. |
188,782 | import copy
import os
from collections import defaultdict
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmcv
import mmengine
import numpy as np
import torch
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import (Codebase, Task, get_codebase_config,
get_input_shape, get_root_logger)
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config( model_cfg: mmengine.Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None, )` to solve the following problem:
Process the model config for sdk model. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing.
Here is the function:
def process_model_config(
model_cfg: mmengine.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None,
):
"""Process the model config for sdk model.
Args:
model_cfg (mmengine.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmengine.Config: the model config after processing.
"""
cfg = copy.deepcopy(model_cfg)
test_pipeline = cfg.test_dataloader.dataset.pipeline
data_preprocessor = cfg.model.data_preprocessor
codec = cfg.codec
if isinstance(codec, list):
codec = codec[-1]
input_size = codec['input_size'] if input_shape is None else input_shape
test_pipeline[0] = dict(type='LoadImageFromFile')
for i in reversed(range(len(test_pipeline))):
trans = test_pipeline[i]
if trans['type'] == 'PackPoseInputs':
test_pipeline.pop(i)
elif trans['type'] == 'GetBBoxCenterScale':
trans['type'] = 'TopDownGetBboxCenterScale'
trans['padding'] = 1.25 # default argument
trans['image_size'] = input_size
elif trans['type'] == 'TopdownAffine':
trans['type'] = 'TopDownAffine'
trans['image_size'] = input_size
trans.pop('input_size')
test_pipeline.append(
dict(
type='Normalize',
mean=data_preprocessor.mean,
std=data_preprocessor.std,
to_rgb=data_preprocessor.get('bgr_to_rgb', False)))
test_pipeline.append(dict(type='ImageToTensor', keys=['img']))
test_pipeline.append(
dict(
type='Collect',
keys=['img'],
meta_keys=[
'img_shape', 'pad_shape', 'ori_shape', 'img_norm_cfg',
'scale_factor', 'bbox_score', 'center', 'scale'
]))
cfg.test_dataloader.dataset.pipeline = test_pipeline
return cfg | Process the model config for sdk model. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing. |
188,783 | import copy
import os
from collections import defaultdict
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import mmcv
import mmengine
import numpy as np
import torch
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase
from mmdeploy.utils import (Codebase, Task, get_codebase_config,
get_input_shape, get_root_logger)
The provided code snippet includes necessary dependencies for implementing the `_get_dataset_metainfo` function. Write a Python function `def _get_dataset_metainfo(model_cfg: mmengine.Config)` to solve the following problem:
Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: (list[str], list[np.ndarray]): Class names and palette
Here is the function:
def _get_dataset_metainfo(model_cfg: mmengine.Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
(list[str], list[np.ndarray]): Class names and palette
"""
from mmpose import datasets # noqa
from mmpose.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
dataset_mmpose = module_dict.get(dataset_cfg.type, None)
if dataset_mmpose is None:
continue
if hasattr(dataset_mmpose, '_load_metainfo') and isinstance(
dataset_mmpose._load_metainfo, Callable):
meta = dataset_mmpose._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_mmpose, 'METAINFO'):
return dataset_mmpose.METAINFO
return None | Get metainfo of dataset. Args: model_cfg Config: Input model Config object. Returns: (list[str], list[np.ndarray]): Class names and palette |
188,784 | import torch
The provided code snippet includes necessary dependencies for implementing the `get_simcc_maximum` function. Write a Python function `def get_simcc_maximum(simcc_x: torch.Tensor, simcc_y: torch.Tensor) -> torch.Tensor` to solve the following problem:
Get maximum response location and value from simcc representations. rewrite to support `torch.Tensor` input type. Args: simcc_x (torch.Tensor): x-axis SimCC in shape (N, K, Wx) simcc_y (torch.Tensor): y-axis SimCC in shape (N, K, Wy) Returns: tuple: - locs (torch.Tensor): locations of maximum heatmap responses in shape (N, K, 2) - vals (torch.Tensor): values of maximum heatmap responses in shape (N, K)
Here is the function:
def get_simcc_maximum(simcc_x: torch.Tensor,
simcc_y: torch.Tensor) -> torch.Tensor:
"""Get maximum response location and value from simcc representations.
rewrite to support `torch.Tensor` input type.
Args:
simcc_x (torch.Tensor): x-axis SimCC in shape (N, K, Wx)
simcc_y (torch.Tensor): y-axis SimCC in shape (N, K, Wy)
Returns:
tuple:
- locs (torch.Tensor): locations of maximum heatmap responses in shape
(N, K, 2)
- vals (torch.Tensor): values of maximum heatmap responses in shape
(N, K)
"""
N, K, _ = simcc_x.shape
simcc_x = simcc_x.flatten(0, 1)
simcc_y = simcc_y.flatten(0, 1)
x_locs = simcc_x.argmax(dim=1, keepdim=True)
y_locs = simcc_y.argmax(dim=1, keepdim=True)
locs = torch.cat((x_locs, y_locs), dim=1).to(torch.float32)
max_val_x, _ = simcc_x.max(dim=1, keepdim=True)
max_val_y, _ = simcc_y.max(dim=1, keepdim=True)
vals, _ = torch.cat([max_val_x, max_val_y], dim=1).min(dim=1)
locs = locs.reshape(N, K, 2)
vals = vals.reshape(N, K)
return locs, vals | Get maximum response location and value from simcc representations. rewrite to support `torch.Tensor` input type. Args: simcc_x (torch.Tensor): x-axis SimCC in shape (N, K, Wx) simcc_y (torch.Tensor): y-axis SimCC in shape (N, K, Wy) Returns: tuple: - locs (torch.Tensor): locations of maximum heatmap responses in shape (N, K, 2) - vals (torch.Tensor): values of maximum heatmap responses in shape (N, K) |
188,785 | from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `mspn_head__forward` function. Write a Python function `def mspn_head__forward(self, feats)` to solve the following problem:
Rewrite `forward` of MSPNHead and CPMHead for default backend. 1. return last stage heatmaps directly. Args: feats (tuple[Tensor]): Input features. Returns: output_heatmap (torch.Tensor): Output heatmaps.
Here is the function:
def mspn_head__forward(self, feats):
"""Rewrite `forward` of MSPNHead and CPMHead for default backend.
1. return last stage heatmaps directly.
Args:
feats (tuple[Tensor]): Input features.
Returns:
output_heatmap (torch.Tensor): Output heatmaps.
"""
ctx = FUNCTION_REWRITER.get_context()
msmu_batch_heatmaps = ctx.origin_func(self, feats)
batch_heatmaps = msmu_batch_heatmaps[-1]
return batch_heatmaps | Rewrite `forward` of MSPNHead and CPMHead for default backend. 1. return last stage heatmaps directly. Args: feats (tuple[Tensor]): Input features. Returns: output_heatmap (torch.Tensor): Output heatmaps. |
188,786 | from typing import List, Optional, Tuple
import torch
from mmpose.structures.bbox import bbox_xyxy2cs
from torch import Tensor
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops.nms import multiclass_nms
from mmdeploy.utils import Backend, get_backend
def multiclass_nms(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False,
nms_type='nms'):
"""Apis for multiclass nms."""
if nms_type == 'nms':
return _multiclass_nms(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=output_index)
elif nms_type == 'nms_rotated':
return multiclass_nms_rotated(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
elif nms_type == 'nms_match':
return multiclass_nms_match(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
else:
raise NotImplementedError(f'Unsupported nms type: {nms_type}.')
func_name='mmdeploy.mmcv.ops.nms._multiclass_nms',
backend=Backend.COREML.value)
The provided code snippet includes necessary dependencies for implementing the `predict` function. Write a Python function `def predict(self, x: Tuple[Tensor], batch_data_samples: List = [], test_cfg: Optional[dict] = None)` to solve the following problem:
Get predictions and transform to bbox and keypoints results. Args: x (Tuple[Tensor]): The input tensor from upstream network. batch_data_samples: Batch image meta info. Defaults to None. test_cfg: The runtime config for testing process. Returns: Tuple[Tensor]: Predict bbox and keypoint results. - dets (Tensor): Predict bboxes and scores, which is a 3D Tensor, has shape (batch_size, num_instances, 5), the last dimension 5 arrange as (x1, y1, x2, y2, score). - pred_kpts (Tensor): Predict keypoints and scores, which is a 4D Tensor, has shape (batch_size, num_instances, num_keypoints, 5), the last dimension 3 arrange as (x, y, score).
Here is the function:
def predict(self,
x: Tuple[Tensor],
batch_data_samples: List = [],
test_cfg: Optional[dict] = None):
"""Get predictions and transform to bbox and keypoints results.
Args:
x (Tuple[Tensor]): The input tensor from upstream network.
batch_data_samples: Batch image meta info. Defaults to None.
test_cfg: The runtime config for testing process.
Returns:
Tuple[Tensor]: Predict bbox and keypoint results.
- dets (Tensor): Predict bboxes and scores, which is a 3D Tensor,
has shape (batch_size, num_instances, 5), the last dimension 5
arrange as (x1, y1, x2, y2, score).
- pred_kpts (Tensor): Predict keypoints and scores, which is a 4D
Tensor, has shape (batch_size, num_instances, num_keypoints, 5),
the last dimension 3 arrange as (x, y, score).
"""
# deploy context
ctx = FUNCTION_REWRITER.get_context()
backend = get_backend(ctx.cfg)
deploy_cfg = ctx.cfg
cfg = self.test_cfg if test_cfg is None else test_cfg
# get predictions
cls_scores, bbox_preds, _, kpt_vis, pose_vecs = self.head_module(x)[:5]
assert len(cls_scores) == len(bbox_preds)
num_imgs = cls_scores[0].shape[0]
# flatten and concat predictions
scores = self._flatten_predictions(cls_scores).sigmoid()
flatten_bbox_preds = self._flatten_predictions(bbox_preds)
flatten_pose_vecs = self._flatten_predictions(pose_vecs)
flatten_kpt_vis = self._flatten_predictions(kpt_vis).sigmoid()
bboxes = self.decode_bbox(flatten_bbox_preds, self.flatten_priors,
self.flatten_stride)
if backend == Backend.TENSORRT:
# pad for batched_nms because its output index is filled with -1
bboxes = torch.cat(
[bboxes,
bboxes.new_zeros((bboxes.shape[0], 1, bboxes.shape[2]))],
dim=1)
scores = torch.cat(
[scores, scores.new_zeros((scores.shape[0], 1, 1))], dim=1)
# nms parameters
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.get('nms_thr', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.get('pre_top_k', -1)
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
# do nms
_, _, nms_indices = multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=True)
batch_inds = torch.arange(num_imgs, device=scores.device).view(-1, 1)
# filter predictions
dets = torch.cat([bboxes, scores], dim=2)
dets = dets[batch_inds, nms_indices, ...]
pose_vecs = flatten_pose_vecs[batch_inds, nms_indices, ...]
kpt_vis = flatten_kpt_vis[batch_inds, nms_indices, ...]
grids = self.flatten_priors[nms_indices, ...]
# decode keypoints
bbox_cs = torch.cat(bbox_xyxy2cs(dets[..., :4], self.bbox_padding), dim=-1)
keypoints = self.dcc.forward_test(pose_vecs, bbox_cs, grids)
pred_kpts = torch.cat([keypoints, kpt_vis.unsqueeze(-1)], dim=-1)
return dets, pred_kpts | Get predictions and transform to bbox and keypoints results. Args: x (Tuple[Tensor]): The input tensor from upstream network. batch_data_samples: Batch image meta info. Defaults to None. test_cfg: The runtime config for testing process. Returns: Tuple[Tensor]: Predict bbox and keypoint results. - dets (Tensor): Predict bboxes and scores, which is a 3D Tensor, has shape (batch_size, num_instances, 5), the last dimension 5 arrange as (x1, y1, x2, y2, score). - pred_kpts (Tensor): Predict keypoints and scores, which is a 4D Tensor, has shape (batch_size, num_instances, num_keypoints, 5), the last dimension 3 arrange as (x, y, score). |
188,787 | from mmdeploy.codebase.mmpose.codecs import get_simcc_maximum
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import get_codebase_config
The provided code snippet includes necessary dependencies for implementing the `simcc_head__forward` function. Write a Python function `def simcc_head__forward(self, feats)` to solve the following problem:
Rewrite `forward` of SimCCHead for default backend. Args: feats (tuple[Tensor]): Input features. Returns: key-points (torch.Tensor): Output keypoints in shape of (N, K, 3)
Here is the function:
def simcc_head__forward(self, feats):
"""Rewrite `forward` of SimCCHead for default backend.
Args:
feats (tuple[Tensor]): Input features.
Returns:
key-points (torch.Tensor): Output keypoints in
shape of (N, K, 3)
"""
ctx = FUNCTION_REWRITER.get_context()
simcc_x, simcc_y = ctx.origin_func(self, feats)
codebase_cfg = get_codebase_config(ctx.cfg)
export_postprocess = codebase_cfg.get('export_postprocess', False)
if not export_postprocess:
return simcc_x, simcc_y
assert self.decoder.use_dark is False, \
'Do not support SimCCLabel with use_dark=True'
pts, scores = get_simcc_maximum(simcc_x, simcc_y)
pts /= self.decoder.simcc_split_ratio
return pts, scores | Rewrite `forward` of SimCCHead for default backend. Args: feats (tuple[Tensor]): Input features. Returns: key-points (torch.Tensor): Output keypoints in shape of (N, K, 3) |
188,788 | from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.mmcv.ops.nms import multiclass_nms
from mmdeploy.utils import Backend, get_backend
def multiclass_nms(boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = -1,
output_index: bool = False,
nms_type='nms'):
"""Apis for multiclass nms."""
if nms_type == 'nms':
return _multiclass_nms(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=output_index)
elif nms_type == 'nms_rotated':
return multiclass_nms_rotated(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
elif nms_type == 'nms_match':
return multiclass_nms_match(
boxes,
scores,
max_output_boxes_per_class=max_output_boxes_per_class,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k)
else:
raise NotImplementedError(f'Unsupported nms type: {nms_type}.')
func_name='mmdeploy.mmcv.ops.nms._multiclass_nms',
backend=Backend.COREML.value)
The provided code snippet includes necessary dependencies for implementing the `predict` function. Write a Python function `def predict(self, x: Tuple[Tensor], batch_data_samples: List = [], test_cfg: Optional[dict] = None)` to solve the following problem:
Get predictions and transform to bbox and keypoints results. Args: x (Tuple[Tensor]): The input tensor from upstream network. batch_data_samples: Batch image meta info. Defaults to None. test_cfg: The runtime config for testing process. Returns: Tuple[Tensor]: Predict bbox and keypoint results. - dets (Tensor): Predict bboxes and scores, which is a 3D Tensor, has shape (batch_size, num_instances, 5), the last dimension 5 arrange as (x1, y1, x2, y2, score). - pred_kpts (Tensor): Predict keypoints and scores, which is a 4D Tensor, has shape (batch_size, num_instances, num_keypoints, 5), the last dimension 3 arrange as (x, y, score).
Here is the function:
def predict(self,
x: Tuple[Tensor],
batch_data_samples: List = [],
test_cfg: Optional[dict] = None):
"""Get predictions and transform to bbox and keypoints results.
Args:
x (Tuple[Tensor]): The input tensor from upstream network.
batch_data_samples: Batch image meta info. Defaults to None.
test_cfg: The runtime config for testing process.
Returns:
Tuple[Tensor]: Predict bbox and keypoint results.
- dets (Tensor): Predict bboxes and scores, which is a 3D Tensor,
has shape (batch_size, num_instances, 5), the last dimension 5
arrange as (x1, y1, x2, y2, score).
- pred_kpts (Tensor): Predict keypoints and scores, which is a 4D
Tensor, has shape (batch_size, num_instances, num_keypoints, 5),
the last dimension 3 arrange as (x, y, score).
"""
cls_scores, objectnesses, bbox_preds, kpt_offsets, \
kpt_vis = self.head_module(x)[:5]
ctx = FUNCTION_REWRITER.get_context()
deploy_cfg = ctx.cfg
dtype = cls_scores[0].dtype
device = cls_scores[0].device
assert len(cls_scores) == len(bbox_preds)
cfg = self.test_cfg if test_cfg is None else test_cfg
num_imgs = cls_scores[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
self.mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=dtype, device=device)
flatten_priors = torch.cat(self.mlvl_priors)
mlvl_strides = [
flatten_priors.new_full((featmap_size.numel(), ), stride)
for featmap_size, stride in zip(featmap_sizes, self.featmap_strides)
]
flatten_stride = torch.cat(mlvl_strides)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = self._flatten_predictions(cls_scores).sigmoid()
flatten_bbox_preds = self._flatten_predictions(bbox_preds)
flatten_objectness = self._flatten_predictions(objectnesses).sigmoid()
flatten_kpt_offsets = self._flatten_predictions(kpt_offsets)
flatten_kpt_vis = self._flatten_predictions(kpt_vis).sigmoid()
bboxes = self.decode_bbox(flatten_bbox_preds, flatten_priors,
flatten_stride)
flatten_decoded_kpts = self.decode_kpt_reg(flatten_kpt_offsets,
flatten_priors, flatten_stride)
scores = flatten_cls_scores * flatten_objectness
pred_kpts = torch.cat([flatten_decoded_kpts,
flatten_kpt_vis.unsqueeze(3)],
dim=3)
backend = get_backend(deploy_cfg)
if backend == Backend.TENSORRT:
# pad for batched_nms because its output index is filled with -1
bboxes = torch.cat(
[bboxes,
bboxes.new_zeros((bboxes.shape[0], 1, bboxes.shape[2]))],
dim=1)
scores = torch.cat(
[scores, scores.new_zeros((scores.shape[0], 1, 1))], dim=1)
pred_kpts = torch.cat([
pred_kpts,
pred_kpts.new_zeros((pred_kpts.shape[0], 1, pred_kpts.shape[2],
pred_kpts.shape[3]))
],
dim=1)
# nms
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.get('nms_thr', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.get('pre_top_k', -1)
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
# do nms
_, _, nms_indices = multiclass_nms(
bboxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
pre_top_k=pre_top_k,
keep_top_k=keep_top_k,
output_index=True)
batch_inds = torch.arange(num_imgs, device=scores.device).view(-1, 1)
dets = torch.cat([bboxes, scores], dim=2)
dets = dets[batch_inds, nms_indices, ...]
pred_kpts = pred_kpts[batch_inds, nms_indices, ...]
return dets, pred_kpts | Get predictions and transform to bbox and keypoints results. Args: x (Tuple[Tensor]): The input tensor from upstream network. batch_data_samples: Batch image meta info. Defaults to None. test_cfg: The runtime config for testing process. Returns: Tuple[Tensor]: Predict bbox and keypoint results. - dets (Tensor): Predict bboxes and scores, which is a 3D Tensor, has shape (batch_size, num_instances, 5), the last dimension 5 arrange as (x1, y1, x2, y2, score). - pred_kpts (Tensor): Predict keypoints and scores, which is a 4D Tensor, has shape (batch_size, num_instances, num_keypoints, 5), the last dimension 3 arrange as (x, y, score). |
188,789 | from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `base_pose_estimator__forward` function. Write a Python function `def base_pose_estimator__forward(self, inputs, *args, **kwargs)` to solve the following problem:
Rewrite `forward` of TopDown for default backend.'. 1.directly call _forward of subclass. Args: ctx (ContextCaller): The context with additional information. self (BasePoseEstimator): The instance of the class Object BasePoseEstimator. inputs (torch.Tensor[NxCxHxW]): Input images. Returns: torch.Tensor: The predicted heatmaps.
Here is the function:
def base_pose_estimator__forward(self, inputs, *args, **kwargs):
"""Rewrite `forward` of TopDown for default backend.'.
1.directly call _forward of subclass.
Args:
ctx (ContextCaller): The context with additional information.
self (BasePoseEstimator): The instance of the class Object
BasePoseEstimator.
inputs (torch.Tensor[NxCxHxW]): Input images.
Returns:
torch.Tensor: The predicted heatmaps.
"""
return self._forward(inputs) | Rewrite `forward` of TopDown for default backend.'. 1.directly call _forward of subclass. Args: ctx (ContextCaller): The context with additional information. self (BasePoseEstimator): The instance of the class Object BasePoseEstimator. inputs (torch.Tensor[NxCxHxW]): Input images. Returns: torch.Tensor: The predicted heatmaps. |
188,790 | import torch
import torch.nn.functional as F
from mmpose.models.utils import rope
from mmdeploy.core import FUNCTION_REWRITER
'mmpose.models.utils.rtmcc_block.ScaleNorm.forward', backend='ncnn')
def scalenorm__forward__ncnn(self, x):
"""Rewrite `scalenorm` for ncnn backend.
Rewrite scalenorm to avoid FP16 exceed in ncnn Android platform.
"""
# The one-dim of Fubinious norm is equal to L2Norm.
# Set p=2 explicitly to map torch.norm to ReduceL2 onnx op,
# which will avoid FP16 exceed.
norm = torch.norm(x, dim=2, keepdim=True)
norm = norm * self.scale
# Rewrite for ncnn binaryop broadcast.
norm = norm.clamp(min=self.eps)
return (x.unsqueeze(2) / norm.unsqueeze(2)).squeeze(2) * self.g
'mmpose.models.utils.rtmcc_block.RTMCCBlock._forward', backend='ncnn')
def rtmccblock___forward_ncnn(self, inputs):
"""Rewrite `_forward` of RTMBlock for ncnn backend.
Rewrite the matmul and avoid unbind for ncnn backend.
"""
if self.attn_type == 'self-attn':
x = inputs
else:
x, k, v = inputs
x = self.ln(x)
uv = self.uv(x)
if self.attn_type == 'self-attn':
uv = self.act_fn(uv)
u = uv[..., :self.e]
v = uv[..., self.e:2 * self.e]
base = uv[..., 2 * self.e:2 * self.e + self.s]
q = (base.unsqueeze(1) * self.gamma[None, None, 0:1, :] +
self.beta[None, None, 0:1, :]).squeeze(1)
k = (base.unsqueeze(1) * self.gamma[None, None, 1:2, :] +
self.beta[None, None, 1:2, :]).squeeze(1)
if self.pos_enc:
q = rope(q, dim=1)
k = rope(k, dim=1)
else:
u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=-1)
k = self.k_fc(k)
v = self.v_fc(v)
if self.pos_enc:
q = rope(q, 1)
k = rope(k, 1)
qk = torch.bmm(q, k.permute(0, 2, 1))
if self.use_rel_bias:
if self.attn_type == 'self-attn':
bias = self.rel_pos_bias(q.size(1))
else:
bias = self.rel_pos_bias(q.size(1), k.size(1))
qk += bias[:, :q.size(1), :k.size(1)]
kernel = torch.square(F.relu(qk / self.sqrt_s))
if self.dropout_rate > 0.:
kernel = self.dropout(kernel)
x = u * torch.bmm(kernel, v)
x = self.o(x)
return x
'mmpose.models.utils.rtmcc_block.Scale.forward', backend='ncnn')
def scale__forward_ncnn(self, x):
"""Rewrite `forward` of Scale for ncnn backend.
Adapt the shape to avoid ncnn BinaryOp seg fault.
"""
x = x.unsqueeze(1)
scale = self.scale[None, None, None, :]
return (x * scale).squeeze(1)
The provided code snippet includes necessary dependencies for implementing the `scalenorm__forward__ncnn` function. Write a Python function `def scalenorm__forward__ncnn(self, x)` to solve the following problem:
Rewrite `scalenorm` for ncnn backend. Rewrite scalenorm to avoid FP16 exceed in ncnn Android platform.
Here is the function:
def scalenorm__forward__ncnn(self, x):
"""Rewrite `scalenorm` for ncnn backend.
Rewrite scalenorm to avoid FP16 exceed in ncnn Android platform.
"""
# The one-dim of Fubinious norm is equal to L2Norm.
# Set p=2 explicitly to map torch.norm to ReduceL2 onnx op,
# which will avoid FP16 exceed.
norm = torch.norm(x, dim=2, keepdim=True)
norm = norm * self.scale
# Rewrite for ncnn binaryop broadcast.
norm = norm.clamp(min=self.eps)
return (x.unsqueeze(2) / norm.unsqueeze(2)).squeeze(2) * self.g | Rewrite `scalenorm` for ncnn backend. Rewrite scalenorm to avoid FP16 exceed in ncnn Android platform. |
188,791 | import torch
import torch.nn.functional as F
from mmpose.models.utils import rope
from mmdeploy.core import FUNCTION_REWRITER
'mmpose.models.utils.rtmcc_block.ScaleNorm.forward', backend='ncnn')
def scalenorm__forward__ncnn(self, x):
"""Rewrite `scalenorm` for ncnn backend.
Rewrite scalenorm to avoid FP16 exceed in ncnn Android platform.
"""
# The one-dim of Fubinious norm is equal to L2Norm.
# Set p=2 explicitly to map torch.norm to ReduceL2 onnx op,
# which will avoid FP16 exceed.
norm = torch.norm(x, dim=2, keepdim=True)
norm = norm * self.scale
# Rewrite for ncnn binaryop broadcast.
norm = norm.clamp(min=self.eps)
return (x.unsqueeze(2) / norm.unsqueeze(2)).squeeze(2) * self.g
'mmpose.models.utils.rtmcc_block.RTMCCBlock._forward', backend='ncnn')
def rtmccblock___forward_ncnn(self, inputs):
"""Rewrite `_forward` of RTMBlock for ncnn backend.
Rewrite the matmul and avoid unbind for ncnn backend.
"""
if self.attn_type == 'self-attn':
x = inputs
else:
x, k, v = inputs
x = self.ln(x)
uv = self.uv(x)
if self.attn_type == 'self-attn':
uv = self.act_fn(uv)
u = uv[..., :self.e]
v = uv[..., self.e:2 * self.e]
base = uv[..., 2 * self.e:2 * self.e + self.s]
q = (base.unsqueeze(1) * self.gamma[None, None, 0:1, :] +
self.beta[None, None, 0:1, :]).squeeze(1)
k = (base.unsqueeze(1) * self.gamma[None, None, 1:2, :] +
self.beta[None, None, 1:2, :]).squeeze(1)
if self.pos_enc:
q = rope(q, dim=1)
k = rope(k, dim=1)
else:
u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=-1)
k = self.k_fc(k)
v = self.v_fc(v)
if self.pos_enc:
q = rope(q, 1)
k = rope(k, 1)
qk = torch.bmm(q, k.permute(0, 2, 1))
if self.use_rel_bias:
if self.attn_type == 'self-attn':
bias = self.rel_pos_bias(q.size(1))
else:
bias = self.rel_pos_bias(q.size(1), k.size(1))
qk += bias[:, :q.size(1), :k.size(1)]
kernel = torch.square(F.relu(qk / self.sqrt_s))
if self.dropout_rate > 0.:
kernel = self.dropout(kernel)
x = u * torch.bmm(kernel, v)
x = self.o(x)
return x
'mmpose.models.utils.rtmcc_block.Scale.forward', backend='ncnn')
def scale__forward_ncnn(self, x):
"""Rewrite `forward` of Scale for ncnn backend.
Adapt the shape to avoid ncnn BinaryOp seg fault.
"""
x = x.unsqueeze(1)
scale = self.scale[None, None, None, :]
return (x * scale).squeeze(1)
The provided code snippet includes necessary dependencies for implementing the `rtmccblock___forward_ncnn` function. Write a Python function `def rtmccblock___forward_ncnn(self, inputs)` to solve the following problem:
Rewrite `_forward` of RTMBlock for ncnn backend. Rewrite the matmul and avoid unbind for ncnn backend.
Here is the function:
def rtmccblock___forward_ncnn(self, inputs):
"""Rewrite `_forward` of RTMBlock for ncnn backend.
Rewrite the matmul and avoid unbind for ncnn backend.
"""
if self.attn_type == 'self-attn':
x = inputs
else:
x, k, v = inputs
x = self.ln(x)
uv = self.uv(x)
if self.attn_type == 'self-attn':
uv = self.act_fn(uv)
u = uv[..., :self.e]
v = uv[..., self.e:2 * self.e]
base = uv[..., 2 * self.e:2 * self.e + self.s]
q = (base.unsqueeze(1) * self.gamma[None, None, 0:1, :] +
self.beta[None, None, 0:1, :]).squeeze(1)
k = (base.unsqueeze(1) * self.gamma[None, None, 1:2, :] +
self.beta[None, None, 1:2, :]).squeeze(1)
if self.pos_enc:
q = rope(q, dim=1)
k = rope(k, dim=1)
else:
u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=-1)
k = self.k_fc(k)
v = self.v_fc(v)
if self.pos_enc:
q = rope(q, 1)
k = rope(k, 1)
qk = torch.bmm(q, k.permute(0, 2, 1))
if self.use_rel_bias:
if self.attn_type == 'self-attn':
bias = self.rel_pos_bias(q.size(1))
else:
bias = self.rel_pos_bias(q.size(1), k.size(1))
qk += bias[:, :q.size(1), :k.size(1)]
kernel = torch.square(F.relu(qk / self.sqrt_s))
if self.dropout_rate > 0.:
kernel = self.dropout(kernel)
x = u * torch.bmm(kernel, v)
x = self.o(x)
return x | Rewrite `_forward` of RTMBlock for ncnn backend. Rewrite the matmul and avoid unbind for ncnn backend. |
188,792 |
The provided code snippet includes necessary dependencies for implementing the `scale__forward_ncnn` function. Write a Python function `def scale__forward_ncnn(self, x)` to solve the following problem:
Rewrite `forward` of Scale for ncnn backend. Adapt the shape to avoid ncnn BinaryOp seg fault.
Here is the function:
def scale__forward_ncnn(self, x):
"""Rewrite `forward` of Scale for ncnn backend.
Adapt the shape to avoid ncnn BinaryOp seg fault.
"""
x = x.unsqueeze(1)
scale = self.scale[None, None, None, :]
return (x * scale).squeeze(1) | Rewrite `forward` of Scale for ncnn backend. Adapt the shape to avoid ncnn BinaryOp seg fault. |
188,793 | from abc import ABCMeta
from mmengine import Config
from mmengine.registry import Registry
from mmdeploy.utils import Codebase, Task, get_task_type
from .task import BaseTask
class MMCodebase(metaclass=ABCMeta):
"""Wrap the apis of OpenMMLab Codebase."""
task_registry: Registry = None
def __init__(self) -> None:
pass
def get_task_class(cls, task: Task) -> BaseTask:
"""Get the task processors class according to the task type.
Args:
task (Task): The task enumeration.
Returns:
type: The task processor class.
"""
return cls.task_registry.module_dict[task.value]
def build_task_processor(cls, model_cfg: Config, deploy_cfg: Config,
device: str):
"""The interface to build the task processors of the codebase.
Args:
model_cfg (str | Config): Model config file.
deploy_cfg (str | Config): Deployment config file.
device (str): A string specifying device type.
Returns:
BaseTask: A task processor.
"""
task = get_task_type(deploy_cfg)
return cls.task_registry.build(
dict(
type=task.value,
model_cfg=model_cfg,
deploy_cfg=deploy_cfg,
device=device))
def register_deploy_modules(cls):
"""register deploy module."""
raise NotImplementedError('register_deploy_modules not implemented.')
def register_all_modules(cls):
"""register codebase module."""
raise NotImplementedError('register_all_modules not implemented.')
CODEBASE = Registry('Codebases')
The provided code snippet includes necessary dependencies for implementing the `get_codebase_class` function. Write a Python function `def get_codebase_class(codebase: Codebase) -> MMCodebase` to solve the following problem:
Get the codebase class from the registry. Args: codebase (Codebase): The codebase enum type. Returns: type: The codebase class
Here is the function:
def get_codebase_class(codebase: Codebase) -> MMCodebase:
"""Get the codebase class from the registry.
Args:
codebase (Codebase): The codebase enum type.
Returns:
type: The codebase class
"""
import importlib
try:
importlib.import_module(f'mmdeploy.codebase.{codebase.value}.deploy')
except ImportError as e:
from mmdeploy.utils import get_root_logger
logger = get_root_logger()
logger.warn(f'Import mmdeploy.codebase.{codebase.value}.deploy failed'
'Please check whether the module is the custom module.'
f'{e}')
return CODEBASE.build({'type': codebase.value}) | Get the codebase class from the registry. Args: codebase (Codebase): The codebase enum type. Returns: type: The codebase class |
188,794 | from typing import Any, List, Optional, Sequence, Union
import mmengine
import torch
from mmaction.utils import LabelList
from mmengine import Config
from mmengine.model import BaseDataPreprocessor
from mmengine.registry import Registry
from mmengine.structures import BaseDataElement, LabelData
from torch import nn
from mmdeploy.codebase.base import BaseBackendModel
from mmdeploy.utils import (Backend, get_backend, get_codebase_config,
get_root_logger, load_config)
__BACKEND_MODEL = Registry('backend_video_recognizer')
The provided code snippet includes necessary dependencies for implementing the `build_video_recognition_model` function. Write a Python function `def build_video_recognition_model( model_files: Sequence[str], model_cfg: Union[str, mmengine.Config], deploy_cfg: Union[str, mmengine.Config], device: str, data_preprocessor: Optional[Union[Config, BaseDataPreprocessor]] = None, **kwargs)` to solve the following problem:
Build video recognition model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: BaseBackendModel: Video recognizer for a configured backend.
Here is the function:
def build_video_recognition_model(
model_files: Sequence[str],
model_cfg: Union[str, mmengine.Config],
deploy_cfg: Union[str, mmengine.Config],
device: str,
data_preprocessor: Optional[Union[Config,
BaseDataPreprocessor]] = None,
**kwargs):
"""Build video recognition model for different backends.
Args:
model_files (Sequence[str]): Input model file(s).
model_cfg (str | mmengine.Config): Input model config file or Config
object.
deploy_cfg (str | mmengine.Config): Input deployment config file or
Config object.
device (str): Device to input model.
data_preprocessor (BaseDataPreprocessor | Config): The data
preprocessor of the model.
Returns:
BaseBackendModel: Video recognizer for a configured backend.
"""
# load cfg if necessary
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
backend = get_backend(deploy_cfg)
model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')
backend_video_recognizer = __BACKEND_MODEL.build(
dict(
type=model_type,
backend=backend,
backend_files=model_files,
device=device,
deploy_cfg=deploy_cfg,
data_preprocessor=data_preprocessor,
**kwargs))
return backend_video_recognizer | Build video recognition model for different backends. Args: model_files (Sequence[str]): Input model file(s). model_cfg (str | mmengine.Config): Input model config file or Config object. deploy_cfg (str | mmengine.Config): Input deployment config file or Config object. device (str): Device to input model. data_preprocessor (BaseDataPreprocessor | Config): The data preprocessor of the model. Returns: BaseBackendModel: Video recognizer for a configured backend. |
188,795 | import os.path as osp
from operator import itemgetter
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import mmengine
import numpy as np
import torch
from mmengine.dataset import pseudo_collate
from mmengine.model import BaseDataPreprocessor
from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_root_logger
from mmdeploy.utils.config_utils import get_input_shape
from .mmaction import MMACTION_TASK
The provided code snippet includes necessary dependencies for implementing the `process_model_config` function. Write a Python function `def process_model_config(model_cfg: mmengine.Config, imgs: Union[Sequence[str], Sequence[np.ndarray]], input_shape: Optional[Sequence[int]] = None)` to solve the following problem:
Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing.
Here is the function:
def process_model_config(model_cfg: mmengine.Config,
imgs: Union[Sequence[str], Sequence[np.ndarray]],
input_shape: Optional[Sequence[int]] = None):
"""Process the model config.
Args:
model_cfg (mmengine.Config): The model config.
imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted
data type are List[str], List[np.ndarray].
input_shape (list[int]): A list of two integer in (width, height)
format specifying input shape. Default: None.
Returns:
mmengine.Config: the model config after processing.
"""
logger = get_root_logger()
cfg = model_cfg.deepcopy()
test_pipeline_cfg = cfg.test_pipeline
if 'Init' not in test_pipeline_cfg[0]['type']:
test_pipeline_cfg = [dict(type='OpenCVInit')] + test_pipeline_cfg
else:
test_pipeline_cfg[0] = dict(type='OpenCVInit')
for i, trans in enumerate(test_pipeline_cfg):
if 'Decode' in trans['type']:
test_pipeline_cfg[i] = dict(type='OpenCVDecode')
cfg.test_pipeline = test_pipeline_cfg
# check whether input_shape is valid
if input_shape is not None:
has_crop = False
crop_size = -1
has_resize = False
scale = (-1, -1)
keep_ratio = True
for trans in cfg.test_pipeline:
if trans['type'] == 'Resize':
has_resize = True
keep_ratio = trans.get('keep_ratio', True)
scale = trans.scale
if trans['type'] in ['TenCrop', 'CenterCrop', 'ThreeCrop']:
has_crop = True
crop_size = trans.crop_size
if has_crop and tuple(input_shape) != (crop_size, crop_size):
logger.error(
f'`input shape` should be equal to `crop_size`: {crop_size},'
f' but given: {input_shape}')
if has_resize and (not has_crop):
if keep_ratio:
logger.error('Resize should set `keep_ratio` to False'
' when `input shape` is given.')
if tuple(input_shape) != scale:
logger.error(
f'`input shape` should be equal to `scale`: {scale},'
f' but given: {input_shape}')
return cfg | Process the model config. Args: model_cfg (mmengine.Config): The model config. imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted data type are List[str], List[np.ndarray]. input_shape (list[int]): A list of two integer in (width, height) format specifying input shape. Default: None. Returns: mmengine.Config: the model config after processing. |
188,796 | from mmaction.utils import OptSampleList
from torch import Tensor
from mmdeploy.core import FUNCTION_REWRITER
The provided code snippet includes necessary dependencies for implementing the `base_recognizer__forward` function. Write a Python function `def base_recognizer__forward(self, inputs: Tensor, data_samples: OptSampleList = None, mode: str = 'tensor', **kwargs)` to solve the following problem:
Rewrite `forward` of Recognizer2D for default backend. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[``ActionDataSample``], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: return a list of `ActionDataSample`
Here is the function:
def base_recognizer__forward(self,
inputs: Tensor,
data_samples: OptSampleList = None,
mode: str = 'tensor',
**kwargs):
"""Rewrite `forward` of Recognizer2D for default backend.
Args:
inputs (torch.Tensor): The input tensor with shape
(N, C, ...) in general.
data_samples (List[``ActionDataSample``], optional): The
annotation data of every samples. Defaults to None.
mode (str): Return what kind of value. Defaults to ``tensor``.
Returns:
return a list of `ActionDataSample`
"""
assert mode == 'predict'
feats, predict_kwargs = self.extract_feat(inputs, test_mode=True)
cls_scores = self.cls_head(feats, **predict_kwargs)
num_segs = cls_scores.shape[0] // len(data_samples)
cls_scores = self.cls_head.average_clip(cls_scores, num_segs=num_segs)
return cls_scores | Rewrite `forward` of Recognizer2D for default backend. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[``ActionDataSample``], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: return a list of `ActionDataSample` |
188,797 | import os
import os.path as osp
import tempfile
from subprocess import call
from typing import List, Optional, Union
import onnx
from .init_plugins import get_onnx2dlc_path
The provided code snippet includes necessary dependencies for implementing the `get_env_key` function. Write a Python function `def get_env_key() -> str` to solve the following problem:
Return environment key str. Returns: str: The string to find SNPE service URI
Here is the function:
def get_env_key() -> str:
"""Return environment key str.
Returns:
str: The string to find SNPE service URI
"""
return '__MMDEPLOY_SNPE_URI' | Return environment key str. Returns: str: The string to find SNPE service URI |
188,798 | import os
import os.path as osp
import tempfile
from subprocess import call
from typing import List, Optional, Union
import onnx
from .init_plugins import get_onnx2dlc_path
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
os.makedirs(dir_name, mode=mode, exist_ok=True)
The provided code snippet includes necessary dependencies for implementing the `get_output_model_file` function. Write a Python function `def get_output_model_file(onnx_path: str, work_dir: Optional[str] = None) -> List[str]` to solve the following problem:
Returns the path to the .dlc file with export result. Args: onnx_path (str): The path to the onnx model. work_dir (str|None): The path to the directory for saving the results. Defaults to `None`, which means use the directory of onnx_path. Returns: List[str]: The path to the files where the export result will be located.
Here is the function:
def get_output_model_file(onnx_path: str,
work_dir: Optional[str] = None) -> List[str]:
"""Returns the path to the .dlc file with export result.
Args:
onnx_path (str): The path to the onnx model.
work_dir (str|None): The path to the directory for saving the results.
Defaults to `None`, which means use the directory of onnx_path.
Returns:
List[str]: The path to the files where the export result will be
located.
"""
if work_dir is None:
work_dir = osp.dirname(onnx_path)
mkdir_or_exist(osp.abspath(work_dir))
file_name = osp.splitext(osp.split(onnx_path)[1])[0]
save_dlc = osp.join(work_dir, file_name + '.dlc')
return save_dlc | Returns the path to the .dlc file with export result. Args: onnx_path (str): The path to the onnx model. work_dir (str|None): The path to the directory for saving the results. Defaults to `None`, which means use the directory of onnx_path. Returns: List[str]: The path to the files where the export result will be located. |
188,799 | import os
import os.path as osp
import tempfile
from subprocess import call
from typing import List, Optional, Union
import onnx
from .init_plugins import get_onnx2dlc_path
def get_onnx2dlc_path() -> str:
"""Get snpe-onnx-to-dlc path.
Returns:
str: A path of snpe-onnx-to-dlc tool.
"""
return shutil.which('snpe-onnx-to-dlc')
The provided code snippet includes necessary dependencies for implementing the `from_onnx` function. Write a Python function `def from_onnx(onnx_model: Union[onnx.ModelProto, str], output_file_prefix: str)` to solve the following problem:
Convert ONNX to dlc. We need to use a executable program to convert the `.onnx` file to a `.dlc` Example: >>> from mmdeploy.apis.snpe import from_onnx >>> onnx_path = 'work_dir/end2end.onnx' >>> output_file_prefix = 'work_dir/end2end' >>> from_onnx(onnx_path, output_file_prefix) Args: onnx_path (ModelProto|str): The path of the onnx model. output_file_prefix (str): The path to save the output .dlc file.
Here is the function:
def from_onnx(onnx_model: Union[onnx.ModelProto, str],
output_file_prefix: str):
"""Convert ONNX to dlc.
We need to use a executable program to convert the `.onnx` file to a `.dlc`
Example:
>>> from mmdeploy.apis.snpe import from_onnx
>>> onnx_path = 'work_dir/end2end.onnx'
>>> output_file_prefix = 'work_dir/end2end'
>>> from_onnx(onnx_path, output_file_prefix)
Args:
onnx_path (ModelProto|str): The path of the onnx model.
output_file_prefix (str): The path to save the output .dlc file.
"""
if not isinstance(onnx_model, str):
onnx_path = tempfile.NamedTemporaryFile(suffix='.onnx').name
onnx.save(onnx_model, onnx_path)
else:
onnx_path = onnx_model
save_dlc = output_file_prefix + '.dlc'
onnx2dlc = get_onnx2dlc_path()
ret_code = call(
[onnx2dlc, '--input_network', onnx_path, '--output', save_dlc])
assert ret_code == 0, 'onnx2dlc failed' | Convert ONNX to dlc. We need to use a executable program to convert the `.onnx` file to a `.dlc` Example: >>> from mmdeploy.apis.snpe import from_onnx >>> onnx_path = 'work_dir/end2end.onnx' >>> output_file_prefix = 'work_dir/end2end' >>> from_onnx(onnx_path, output_file_prefix) Args: onnx_path (ModelProto|str): The path of the onnx model. output_file_prefix (str): The path to save the output .dlc file. |
188,800 | import os
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
import tvm
from mmengine import Registry
from tvm import IRModule, auto_scheduler, autotvm, relay
from tvm.target import Target
from mmdeploy.utils import get_root_logger
AUTOTVM_TUNER = Registry('autotvm_tuner')
AUTOTVM_TUNER.register_module()(autotvm.tuner.XGBTuner)
AUTOTVM_TUNER.register_module()(autotvm.tuner.GATuner)
AUTOTVM_TUNER.register_module()(autotvm.tuner.GridSearchTuner)
AUTOTVM_TUNER.register_module()(autotvm.tuner.RandomTuner)
The provided code snippet includes necessary dependencies for implementing the `build_autotvm_tuner` function. Write a Python function `def build_autotvm_tuner(cfg: Dict)` to solve the following problem:
Build the autotvm tuner. Args: cfg (Dict): The build config Returns: Any: The autotvm tuner instance
Here is the function:
def build_autotvm_tuner(cfg: Dict):
"""Build the autotvm tuner.
Args:
cfg (Dict): The build config
Returns:
Any: The autotvm tuner instance
"""
return AUTOTVM_TUNER.build(cfg) | Build the autotvm tuner. Args: cfg (Dict): The build config Returns: Any: The autotvm tuner instance |
188,801 | import os
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
import tvm
from mmengine import Registry
from tvm import IRModule, auto_scheduler, autotvm, relay
from tvm.target import Target
from mmdeploy.utils import get_root_logger
AUTOTVM_BUILDER = Registry('autotvm_builder')
AUTOTVM_BUILDER.register_module()(autotvm.LocalBuilder)
The provided code snippet includes necessary dependencies for implementing the `build_autotvm_builder` function. Write a Python function `def build_autotvm_builder(cfg: Dict)` to solve the following problem:
Build the autotvm builder. Args: cfg (Dict): The build config Returns: Any: The autotvm builder instance
Here is the function:
def build_autotvm_builder(cfg: Dict):
"""Build the autotvm builder.
Args:
cfg (Dict): The build config
Returns:
Any: The autotvm builder instance
"""
return AUTOTVM_BUILDER.build(cfg) | Build the autotvm builder. Args: cfg (Dict): The build config Returns: Any: The autotvm builder instance |
188,802 | import os
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
import tvm
from mmengine import Registry
from tvm import IRModule, auto_scheduler, autotvm, relay
from tvm.target import Target
from mmdeploy.utils import get_root_logger
AUTOTVM_RUNNER = Registry('autotvm_runner')
AUTOTVM_RUNNER.register_module()(autotvm.LocalRunner)
AUTOTVM_RUNNER.register_module()(autotvm.RPCRunner)
The provided code snippet includes necessary dependencies for implementing the `build_autotvm_runner` function. Write a Python function `def build_autotvm_runner(cfg: Dict)` to solve the following problem:
Build the autotvm runner. Args: cfg (Dict): The build config Returns: Any: The autotvm runner instance
Here is the function:
def build_autotvm_runner(cfg: Dict):
"""Build the autotvm runner.
Args:
cfg (Dict): The build config
Returns:
Any: The autotvm runner instance
"""
return AUTOTVM_RUNNER.build(cfg) | Build the autotvm runner. Args: cfg (Dict): The build config Returns: Any: The autotvm runner instance |
188,803 | import os
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
import tvm
from mmengine import Registry
from tvm import IRModule, auto_scheduler, autotvm, relay
from tvm.target import Target
from mmdeploy.utils import get_root_logger
AUTO_SCHEDULER_BUILDER = Registry('auto_scheduler_builder')
AUTO_SCHEDULER_BUILDER.register_module()(auto_scheduler.LocalBuilder)
The provided code snippet includes necessary dependencies for implementing the `build_auto_scheduler_builder` function. Write a Python function `def build_auto_scheduler_builder(cfg: Dict)` to solve the following problem:
Build the ansor builder. Args: cfg (Dict): The build config Returns: Any: The ansor builder instance
Here is the function:
def build_auto_scheduler_builder(cfg: Dict):
"""Build the ansor builder.
Args:
cfg (Dict): The build config
Returns:
Any: The ansor builder instance
"""
return AUTO_SCHEDULER_BUILDER.build(cfg) | Build the ansor builder. Args: cfg (Dict): The build config Returns: Any: The ansor builder instance |
188,804 | import os
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
import tvm
from mmengine import Registry
from tvm import IRModule, auto_scheduler, autotvm, relay
from tvm.target import Target
from mmdeploy.utils import get_root_logger
AUTO_SCHEDULER_RUNNER = Registry('auto_scheduler_runner')
AUTO_SCHEDULER_RUNNER.register_module()(auto_scheduler.LocalRunner)
AUTO_SCHEDULER_RUNNER.register_module()(auto_scheduler.RPCRunner)
The provided code snippet includes necessary dependencies for implementing the `build_auto_scheduler_runner` function. Write a Python function `def build_auto_scheduler_runner(cfg: Dict)` to solve the following problem:
Build the ansor tuner. Args: cfg (Dict): The build config Returns: Any: The ansor tuner instance
Here is the function:
def build_auto_scheduler_runner(cfg: Dict):
"""Build the ansor tuner.
Args:
cfg (Dict): The build config
Returns:
Any: The ansor tuner instance
"""
return AUTO_SCHEDULER_RUNNER.build(cfg) | Build the ansor tuner. Args: cfg (Dict): The build config Returns: Any: The ansor tuner instance |
188,805 | from typing import Callable, Dict, Optional, Union
import onnx
from tvm.relay.frontend import from_onnx as relay_from_onnx
from tvm.relay.quantize import QConfig
from tvm.relay.quantize import qconfig as create_qconfig
from tvm.relay.quantize import quantize
from tvm.target import Target
from mmdeploy.utils import get_root_logger
from .tuner import TVMTunerBase, build_tvm_tuner
def build_tvm_tuner(cfg: Dict):
"""Build the tvm tuner.
Args:
cfg (Dict): The build config
Returns:
Any: The tvm tuner instance
"""
return TVM_TUNER.build(cfg)
class TVMTunerBase:
"""The base class of TVM tuner.
Args:
target (Union[str, Target]): The target platform to be tuned.
opt_level (int): The optimization level.
use_vm (bool): Enable tvm virtual machine runtime.
"""
def __init__(self,
target: Union[str, Target],
opt_level: int = 3,
use_vm: bool = False) -> None:
if isinstance(target, str):
target = Target(target)
self._target = target
self._opt_level = opt_level
self._use_vm = use_vm
def use_vm(self) -> bool:
"""Get use_vm.
Returns:
bool: use_vm
"""
return self._use_vm
def tune(self, mod: IRModule, params: Dict):
"""Tune the graph.
Args:
mod (IRModule): The graph module.
params (Dict): The graph parameters.
"""
raise NotImplementedError('tune method not implemented.')
def build(self, mod: IRModule, params: Dict):
"""Build tuning library.
Args:
mod (IRModule): IRModule to build
params (Dict): Parameter of the mod
Returns:
lib: The runtime factory for the graph executor
"""
with tvm.transform.PassContext(opt_level=self._opt_level):
if self._use_vm:
ret = relay.vm.compile(mod, target=self._target, params=params)
else:
ret = relay.build_module.build(
mod, target=self._target, params=params)
return ret
The provided code snippet includes necessary dependencies for implementing the `from_onnx` function. Write a Python function `def from_onnx(onnx_model: Union[str, onnx.ModelProto], output_file: str, use_vm: bool = False, bytecode_file: str = '', shape: Optional[Dict] = None, dtype: Union[str, Dict] = 'float32', tuner: Optional[Union[TVMTunerBase, Dict]] = None, qconfig: Optional[Union[QConfig, Dict]] = None, dataset: Optional[Callable] = None)` to solve the following problem:
Convert ONNX model to tvm lib. Args: onnx_model (Union[str, onnx.ModelProto]): ONNX model or model path output_file (str): output library path use_vm (bool, optional): Enable tvm virtual machine runtime. Defaults to False. bytecode_file (str, optional): output bytecode path for virtual machine. Defaults to ''. shape (Optional[Dict], optional): The input shape directory. Defaults to None. dtype (Union[str, Dict], optional): The input data type dictionary. Defaults to 'float32'. tuner (Optional[Union[TVMTunerBase, Dict]], optional): The tuner config. Defaults to None. Return: lib: The converted tvm lib bytecode: The bytecode of virtual machine runtime. None if use_vm==False. Examples: >>> from mmdeploy.backend.tvm import from_onnx >>> onnx_path = 'model.onnx' >>> output_file = 'model.so' >>> shape = {'input':[1,3,224,224]} >>> dtype = {'input':'float32'} >>> from_onnx(onnx_path, output_file, shape=shape, dtype=dtype)
Here is the function:
def from_onnx(onnx_model: Union[str, onnx.ModelProto],
output_file: str,
use_vm: bool = False,
bytecode_file: str = '',
shape: Optional[Dict] = None,
dtype: Union[str, Dict] = 'float32',
tuner: Optional[Union[TVMTunerBase, Dict]] = None,
qconfig: Optional[Union[QConfig, Dict]] = None,
dataset: Optional[Callable] = None):
"""Convert ONNX model to tvm lib.
Args:
onnx_model (Union[str, onnx.ModelProto]): ONNX model or model path
output_file (str): output library path
use_vm (bool, optional): Enable tvm virtual machine runtime.
Defaults to False.
bytecode_file (str, optional): output bytecode path for virtual
machine. Defaults to ''.
shape (Optional[Dict], optional): The input shape directory. Defaults
to None.
dtype (Union[str, Dict], optional): The input data type dictionary.
Defaults to 'float32'.
tuner (Optional[Union[TVMTunerBase, Dict]], optional): The tuner
config. Defaults to None.
Return:
lib: The converted tvm lib
bytecode: The bytecode of virtual machine runtime.
None if use_vm==False.
Examples:
>>> from mmdeploy.backend.tvm import from_onnx
>>> onnx_path = 'model.onnx'
>>> output_file = 'model.so'
>>> shape = {'input':[1,3,224,224]}
>>> dtype = {'input':'float32'}
>>> from_onnx(onnx_path, output_file, shape=shape, dtype=dtype)
"""
logger = get_root_logger()
if shape is not None and isinstance(dtype, Dict):
assert len(shape) == len(dtype)
for name in shape:
assert name in dtype
if isinstance(onnx_model, str):
onnx_model = onnx.load(onnx_model)
assert isinstance(onnx_model, onnx.ModelProto
), f'Expect onnx.ModelProto, but get {type(onnx_model)}.'
logger.info('Convert onnx to IRModule.')
mod, params = relay_from_onnx(onnx_model, shape, dtype=dtype, opset=11)
# quantization
if qconfig is not None:
logger.info('Quantization')
if isinstance(qconfig, Dict):
qconfig = create_qconfig(**qconfig)
with qconfig:
mod = quantize(mod, params, dataset)
if tuner is None:
# use default tuner
tuner = dict(type='DefaultTuner', target=Target('llvm'))
if not issubclass(type(tuner), TVMTunerBase):
tuner['use_vm'] = use_vm
tuner = build_tvm_tuner(tuner)
logger.info(f'Tuning with {type(tuner).__name__} .')
tuner.tune(mod, params)
lib = tuner.build(mod, params)
logger.info(f'Export library to {output_file} .')
bytecode = None
if tuner.use_vm:
bytecode, lib = lib.save()
with open(bytecode_file, mode='wb') as f:
f.write(bytecode)
lib.export_library(output_file)
return lib, bytecode | Convert ONNX model to tvm lib. Args: onnx_model (Union[str, onnx.ModelProto]): ONNX model or model path output_file (str): output library path use_vm (bool, optional): Enable tvm virtual machine runtime. Defaults to False. bytecode_file (str, optional): output bytecode path for virtual machine. Defaults to ''. shape (Optional[Dict], optional): The input shape directory. Defaults to None. dtype (Union[str, Dict], optional): The input data type dictionary. Defaults to 'float32'. tuner (Optional[Union[TVMTunerBase, Dict]], optional): The tuner config. Defaults to None. Return: lib: The converted tvm lib bytecode: The bytecode of virtual machine runtime. None if use_vm==False. Examples: >>> from mmdeploy.backend.tvm import from_onnx >>> onnx_path = 'model.onnx' >>> output_file = 'model.so' >>> shape = {'input':[1,3,224,224]} >>> dtype = {'input':'float32'} >>> from_onnx(onnx_path, output_file, shape=shape, dtype=dtype) |
188,806 | import math
import os.path as osp
import mmengine
from mmdeploy.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `update_sdk_pipeline` function. Write a Python function `def update_sdk_pipeline(work_dir: str)` to solve the following problem:
Update pipeline.json for Ascend. Args: work_dir (str):The work directory to load/save the pipeline.json
Here is the function:
def update_sdk_pipeline(work_dir: str):
"""Update pipeline.json for Ascend.
Args:
work_dir (str):The work directory to load/save the pipeline.json
"""
logger = get_root_logger()
def _try_ori_agnostic_pad(transforms):
trans_resize = None
trans_pad = None
for trans in transforms:
if trans['type'] == 'Resize' and trans.get('keep_ratio', False):
trans_resize = trans
elif trans['type'] == 'Pad' and trans.get('size_divisor',
None) is not None:
trans_pad = trans
if trans_resize is not None and trans_pad is not None:
logger.info('update Pad transform.')
size = trans_resize['size']
divisor = trans_pad['size_divisor']
size = tuple(int(math.ceil(s / divisor) * divisor) for s in size)
trans_pad['size'] = size
trans_pad['orientation_agnostic'] = True
trans_pad.pop('size_divisor')
pipeline_path = osp.join(work_dir, 'pipeline.json')
if osp.exists(pipeline_path):
pipeline = mmengine.load(pipeline_path)
tasks = pipeline['pipeline'].get('tasks', [])
for task in tasks:
if task.get('module', '') == 'Transform':
transforms = task['transforms']
_try_ori_agnostic_pad(transforms)
mmengine.dump(pipeline, pipeline_path, sort_keys=False, indent=4) | Update pipeline.json for Ascend. Args: work_dir (str):The work directory to load/save the pipeline.json |
188,807 | import os.path as osp
import tempfile
from subprocess import call
from typing import Dict, Sequence, Union
import onnx
from mmdeploy.utils import get_root_logger
def make_shape_string(name, dims):
return f'{name}:{",".join(map(str, dims))}'
def _concat(dims: Sequence) -> str:
return ';'.join([','.join(map(str, x)) for x in dims])
The provided code snippet includes necessary dependencies for implementing the `from_onnx` function. Write a Python function `def from_onnx(onnx_model: Union[onnx.ModelProto, str], work_dir: str, model_inputs: Dict)` to solve the following problem:
Convert ONNX to Ascend model. Example: >>> from mmdeploy.apis.ascend import from_onnx >>> onnx_path = 'work_dir/end2end.onnx' >>> model_inputs = mmengine.Config( >>> dict(input_shapes=dict(input=[1, 3, 224, 224]))) >>> from_onnx(onnx_path, work_dir, model_inputs) Args: onnx_path (ModelProto|str): The path of the onnx model. work_dir (str): Path to load onnx and save model. model_inputs (Dict): The input args to the atc tools.
Here is the function:
def from_onnx(onnx_model: Union[onnx.ModelProto, str], work_dir: str,
model_inputs: Dict):
"""Convert ONNX to Ascend model.
Example:
>>> from mmdeploy.apis.ascend import from_onnx
>>> onnx_path = 'work_dir/end2end.onnx'
>>> model_inputs = mmengine.Config(
>>> dict(input_shapes=dict(input=[1, 3, 224, 224])))
>>> from_onnx(onnx_path, work_dir, model_inputs)
Args:
onnx_path (ModelProto|str): The path of the onnx model.
work_dir (str): Path to load onnx and save model.
model_inputs (Dict): The input args to the atc tools.
"""
logger = get_root_logger()
if not isinstance(onnx_model, str):
onnx_path = tempfile.NamedTemporaryFile(suffix='.onnx').name
onnx.save(onnx_model, onnx_path)
else:
onnx_path = onnx_model
onnx_model = onnx.load(onnx_path)
for n in onnx_model.graph.node:
if n.domain != '':
n.domain = ''
for i in range(1, len(onnx_model.opset_import)):
onnx_model.opset_import.pop(i)
onnx.save(onnx_model, onnx_path)
output_path = osp.join(work_dir, osp.splitext(osp.split(onnx_path)[1])[0])
input_shapes = []
for name, dims in model_inputs['input_shapes'].items():
input_shapes.append(make_shape_string(name, dims))
input_shapes = ';'.join(input_shapes)
input_format = 'ND' if 'dynamic_dims' in model_inputs else 'NCHW'
args = [
f'--model={onnx_path}', '--framework=5', f'--output={output_path}',
'--soc_version=Ascend310', f'--input_format={input_format}',
f'--input_shape={input_shapes}'
]
if 'dynamic_batch_size' in model_inputs:
dynamic_batch_size = ','.join(
map(str, model_inputs['dynamic_batch_size']))
args.append(f'--dynamic_batch_size={dynamic_batch_size}')
elif 'dynamic_image_size' in model_inputs:
dynamic_image_size = _concat(model_inputs['dynamic_image_size'])
args.append(f'--dynamic_image_size={dynamic_image_size}')
elif 'dynamic_dims' in model_inputs:
dynamic_dims = _concat(model_inputs['dynamic_dims'])
args.append(f'--dynamic_dims={dynamic_dims}')
logger.info(' '.join(('atc', *args)))
ret_code = call(['atc', *args])
assert ret_code == 0 | Convert ONNX to Ascend model. Example: >>> from mmdeploy.apis.ascend import from_onnx >>> onnx_path = 'work_dir/end2end.onnx' >>> model_inputs = mmengine.Config( >>> dict(input_shapes=dict(input=[1, 3, 224, 224]))) >>> from_onnx(onnx_path, work_dir, model_inputs) Args: onnx_path (ModelProto|str): The path of the onnx model. work_dir (str): Path to load onnx and save model. model_inputs (Dict): The input args to the atc tools. |
188,808 | import os
from contextlib import contextmanager
from typing import Dict, List, NamedTuple, Sequence
import acl
import numpy as np
import torch
from mmdeploy.utils import Backend
from mmdeploy.utils.timer import TimeCounter
from ..base import BACKEND_WRAPPER, BaseWrapper
class Error(Exception):
"""Acl Exception."""
pass
The provided code snippet includes necessary dependencies for implementing the `_check` function. Write a Python function `def _check(code: int, msg: str)` to solve the following problem:
check the error code. Args: code (int): The error code. msg (str): Error message.
Here is the function:
def _check(code: int, msg: str):
"""check the error code.
Args:
code (int): The error code.
msg (str): Error message.
"""
if code != 0:
raise Error(msg, code) | check the error code. Args: code (int): The error code. msg (str): Error message. |
188,809 | from typing import Dict, Optional, Sequence, Union
import coremltools as ct
import torch
from mmdeploy.utils import get_root_logger
def get_model_suffix(convert_to: str) -> str:
assert convert_to == 'neuralnetwork' or convert_to == 'mlprogram'
suffix = ''
if convert_to == 'neuralnetwork':
suffix = '.mlmodel'
if convert_to == 'mlprogram':
suffix = '.mlpackage'
return suffix
def create_shape(name: str, input_shapes: Dict) -> ct.Shape:
"""Create input shape."""
min_shape = input_shapes['min_shape']
max_shape = input_shapes['max_shape']
default_shape = input_shapes['default_shape']
assert len(min_shape) == len(max_shape) == len(default_shape)
shape = []
n_dim = len(min_shape)
for i in range(n_dim):
low = min_shape[i]
high = max_shape[i]
assert low <= high
if low == -1 or high == -1:
shape.append(ct.RangeDim())
elif low == high:
shape.append(low)
else:
shape.append(ct.RangeDim(low, high))
shape = ct.Shape(shape=shape, default=default_shape)
return ct.TensorType(shape=shape, name=name)
The provided code snippet includes necessary dependencies for implementing the `from_torchscript` function. Write a Python function `def from_torchscript(torchscript_model: Union[str, torch.jit.RecursiveScriptModule], output_file_prefix: str, input_names: Sequence[str], output_names: Sequence[str], input_shapes: Dict[str, Dict], compute_precision: str = 'FLOAT32', convert_to: str = 'neuralnetwork', minimum_deployment_target: Optional[str] = None, skip_model_load: bool = True, **kwargs)` to solve the following problem:
Create a coreml engine from torchscript. Args: torchscript_model (Union[str, torch.jit.RecursiveScriptModule]): The torchscript model to be converted. output_file_prefix (str): The output file prefix. input_names (Sequence[str]): The input names of the model. output_names (Sequence[str]): The output names of the model. input_shapes (Dict): The input shapes include max_shape, min_shape and default_shape compute_precision (str): The model precision, FLOAT16 or FLOAT32, see coremltools.precision, default `FLOAT32`. convert_to (str): The converted model type, can be 'neuralnetwork' or 'mlprogram'. Defaults to 'neuralnetwork'. minimum_deployment_target (str, optional): minimum deploy target. iOS15, iOS16, etc., see coremltools.target skip_model_load (bool, optional): Skip model load. Defaults to True.
Here is the function:
def from_torchscript(torchscript_model: Union[str,
torch.jit.RecursiveScriptModule],
output_file_prefix: str,
input_names: Sequence[str],
output_names: Sequence[str],
input_shapes: Dict[str, Dict],
compute_precision: str = 'FLOAT32',
convert_to: str = 'neuralnetwork',
minimum_deployment_target: Optional[str] = None,
skip_model_load: bool = True,
**kwargs):
"""Create a coreml engine from torchscript.
Args:
torchscript_model (Union[str, torch.jit.RecursiveScriptModule]):
The torchscript model to be converted.
output_file_prefix (str): The output file prefix.
input_names (Sequence[str]): The input names of the model.
output_names (Sequence[str]): The output names of the model.
input_shapes (Dict): The input shapes include max_shape, min_shape and
default_shape
compute_precision (str): The model precision,
FLOAT16 or FLOAT32, see coremltools.precision, default `FLOAT32`.
convert_to (str): The converted model type, can be
'neuralnetwork' or 'mlprogram'. Defaults to 'neuralnetwork'.
minimum_deployment_target (str, optional): minimum deploy target.
iOS15, iOS16, etc., see coremltools.target
skip_model_load (bool, optional): Skip model load. Defaults to True.
"""
try:
from mmdeploy.backend.torchscript import get_ops_path
torch.ops.load_library(get_ops_path())
except Exception as e:
get_root_logger().warning(
'Can not load custom ops because:\n'
f'{e}\n'
'Some model might not be able to be converted.')
if isinstance(torchscript_model, str):
torchscript_model = torch.jit.load(torchscript_model)
inputs = []
outputs = []
for name in input_names:
shape = create_shape(name, input_shapes[name])
inputs.append(shape)
for name in output_names:
outputs.append(ct.TensorType(name=name))
if convert_to == 'neuralnetwork':
compute_precision = None
else:
compute_precision = ct.precision[compute_precision]
mlmodel = ct.convert(
model=torchscript_model,
inputs=inputs,
outputs=outputs,
compute_precision=compute_precision,
convert_to=convert_to,
minimum_deployment_target=ct.target[minimum_deployment_target]
if minimum_deployment_target else None,
skip_model_load=skip_model_load)
suffix = get_model_suffix(convert_to)
output_path = output_file_prefix + suffix
mlmodel.save(output_path) | Create a coreml engine from torchscript. Args: torchscript_model (Union[str, torch.jit.RecursiveScriptModule]): The torchscript model to be converted. output_file_prefix (str): The output file prefix. input_names (Sequence[str]): The input names of the model. output_names (Sequence[str]): The output names of the model. input_shapes (Dict): The input shapes include max_shape, min_shape and default_shape compute_precision (str): The model precision, FLOAT16 or FLOAT32, see coremltools.precision, default `FLOAT32`. convert_to (str): The converted model type, can be 'neuralnetwork' or 'mlprogram'. Defaults to 'neuralnetwork'. minimum_deployment_target (str, optional): minimum deploy target. iOS15, iOS16, etc., see coremltools.target skip_model_load (bool, optional): Skip model load. Defaults to True. |
188,810 | from coremltools.converters.mil import Builder as mb
from coremltools.converters.mil.frontend.torch.ops import _get_inputs
from coremltools.converters.mil.frontend.torch.torch_op_registry import \
register_torch_op
The provided code snippet includes necessary dependencies for implementing the `coreml_nms` function. Write a Python function `def coreml_nms(context, node)` to solve the following problem:
bind CoreML NMS op.
Here is the function:
def coreml_nms(context, node):
"""bind CoreML NMS op."""
inputs = _get_inputs(context, node)
boxes = inputs[0]
scores = inputs[1]
iou_threshold = inputs[2]
score_threshold = inputs[3]
max_boxes = inputs[4]
results = mb.non_maximum_suppression(
boxes=boxes,
scores=scores,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
max_boxes=max_boxes)
context.add(tuple(results), torch_name=node.outputs[0]) | bind CoreML NMS op. |
188,811 | from coremltools.converters.mil import Builder as mb
from coremltools.converters.mil.frontend.torch.ops import _get_inputs
from coremltools.converters.mil.frontend.torch.torch_op_registry import \
register_torch_op
def stack(context, node):
inputs = _get_inputs(context, node)
values = inputs[0]
if len(inputs) < 2:
axis = 0
else:
axis = inputs[1]
if hasattr(axis, 'val'):
axis = axis.val
if axis < 0:
val_dim = len(values[0].shape)
axis = axis + val_dim + 1
res = mb.stack(values=values, axis=axis, name=node.name)
context.add(res) | null |
188,812 | from coremltools.converters.mil import Builder as mb
from coremltools.converters.mil.frontend.torch.ops import _get_inputs
from coremltools.converters.mil.frontend.torch.torch_op_registry import \
register_torch_op
The provided code snippet includes necessary dependencies for implementing the `roi_align` function. Write a Python function `def roi_align(context, node)` to solve the following problem:
roi align.
Here is the function:
def roi_align(context, node):
"""roi align."""
inputs = _get_inputs(context, node)
x = context[node.inputs[0]]
input_shape = x.shape # (B, C, h_in, w_in)
if len(input_shape) != 4:
raise ValueError(
'"CropResize" op: expected input rank 4, got {}'.format(x.rank))
const_box_info = True
if context[node.inputs[1]].val is None or context[
node.inputs[2]].val is None:
const_box_info = False
extrapolation_value = context[node.inputs[2]].val
# CoreML index information along with boxes
if const_box_info:
boxes = context[node.inputs[1]].val
# CoreML expects boxes/ROI in
# [N, 1, 5, 1, 1] format
boxes = boxes.reshape(boxes.shape[0], 1, boxes.shape[1], 1, 1)
else:
boxes = inputs[1]
boxes = mb.reshape(
x=boxes, shape=[boxes.shape[0], 1, boxes.shape[1], 1, 1])
# Get Height and Width of crop
h_out = inputs[3]
w_out = inputs[4]
# Torch input format: [B, C, h_in, w_in]
# CoreML input format: [B, C, h_in, w_in]
# Crop Resize
x = mb.crop_resize(
x=x,
roi=boxes,
target_height=h_out.val,
target_width=w_out.val,
normalized_coordinates=False,
spatial_scale=extrapolation_value,
box_coordinate_mode='CORNERS_WIDTH_FIRST',
sampling_mode='OFFSET_CORNERS',
)
# CoreML output format: [N, 1, C, h_out, w_out]
# Torch output format: [N, C, h_out, w_out]
x = mb.squeeze(x=x, axes=[1])
context.add(x, torch_name=node.outputs[0]) | roi align. |
188,813 | import os.path as osp
from typing import Dict, Union
import mmengine
import onnx
from mmdeploy.utils import (get_calib_filename, get_common_config,
get_model_inputs, load_config, parse_device_id)
from mmdeploy.utils.config_utils import get_ir_config
from .utils import from_onnx, get_trt_log_level
def get_ir_config(deploy_cfg: Union[str, mmengine.Config]) -> Dict:
"""Get the IR parameters in export() from config.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
Dict: The config dictionary of IR parameters
"""
deploy_cfg = load_config(deploy_cfg)[0]
ir_config = deploy_cfg.get('ir_config', None)
if ir_config is None:
# TODO: deprecate in future
ir_config = deploy_cfg.get('onnx_config', {})
return ir_config
def from_onnx(onnx_model: Union[str, onnx.ModelProto],
output_file_prefix: str,
input_shapes: Dict[str, Sequence[int]],
max_workspace_size: int = 0,
fp16_mode: bool = False,
int8_mode: bool = False,
int8_param: Optional[dict] = None,
device_id: int = 0,
log_level: trt.Logger.Severity = trt.Logger.ERROR,
**kwargs) -> trt.ICudaEngine:
"""Create a tensorrt engine from ONNX.
Args:
onnx_model (str or onnx.ModelProto): Input onnx model to convert from.
output_file_prefix (str): The path to save the output ncnn file.
input_shapes (Dict[str, Sequence[int]]): The min/opt/max shape of
each input.
max_workspace_size (int): To set max workspace size of TensorRT engine.
some tactics and layers need large workspace. Defaults to `0`.
fp16_mode (bool): Specifying whether to enable fp16 mode.
Defaults to `False`.
int8_mode (bool): Specifying whether to enable int8 mode.
Defaults to `False`.
int8_param (dict): A dict of parameter int8 mode. Defaults to `None`.
device_id (int): Choice the device to create engine. Defaults to `0`.
log_level (trt.Logger.Severity): The log level of TensorRT. Defaults to
`trt.Logger.ERROR`.
Returns:
tensorrt.ICudaEngine: The TensorRT engine created from onnx_model.
Example:
>>> from mmdeploy.apis.tensorrt import from_onnx
>>> engine = from_onnx(
>>> "onnx_model.onnx",
>>> {'input': {"min_shape" : [1, 3, 160, 160],
>>> "opt_shape" : [1, 3, 320, 320],
>>> "max_shape" : [1, 3, 640, 640]}},
>>> log_level=trt.Logger.WARNING,
>>> fp16_mode=True,
>>> max_workspace_size=1 << 30,
>>> device_id=0)
>>> })
"""
if int8_mode or device_id != 0:
import pycuda.autoinit # noqa:F401
if device_id != 0:
import os
old_cuda_device = os.environ.get('CUDA_DEVICE', None)
os.environ['CUDA_DEVICE'] = str(device_id)
if old_cuda_device is not None:
os.environ['CUDA_DEVICE'] = old_cuda_device
else:
os.environ.pop('CUDA_DEVICE')
# build a mmdeploy logger
logger = get_root_logger()
load_tensorrt_plugin()
# build a tensorrt logger
trt_logger = trt.Logger(log_level)
# create builder and network
builder = trt.Builder(trt_logger)
# TODO: use TorchAllocator as builder.gpu_allocator
EXPLICIT_BATCH = 1 << (int)(
trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(EXPLICIT_BATCH)
# parse onnx
parser = trt.OnnxParser(network, trt_logger)
if isinstance(onnx_model, str):
parse_valid = parser.parse_from_file(onnx_model)
elif isinstance(onnx_model, onnx.ModelProto):
parse_valid = parser.parse(onnx_model.SerializeToString())
else:
raise TypeError('Unsupported onnx model type!')
if not parse_valid:
error_msgs = ''
for error in range(parser.num_errors):
error_msgs += f'{parser.get_error(error)}\n'
raise RuntimeError(f'Failed to parse onnx, {error_msgs}')
# config builder
if version.parse(trt.__version__) < version.parse('8'):
builder.max_workspace_size = max_workspace_size
config = builder.create_builder_config()
if hasattr(config, 'set_memory_pool_limit'):
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE,
max_workspace_size)
else:
config.max_workspace_size = max_workspace_size
cuda_version = search_cuda_version()
if cuda_version is not None:
version_major = int(cuda_version.split('.')[0])
if version_major < 11:
# cu11 support cublasLt, so cudnn heuristic tactic should disable CUBLAS_LT # noqa E501
tactic_source = config.get_tactic_sources() - (
1 << int(trt.TacticSource.CUBLAS_LT))
config.set_tactic_sources(tactic_source)
profile = builder.create_optimization_profile()
for input_name, param in input_shapes.items():
min_shape = param['min_shape']
opt_shape = param['opt_shape']
max_shape = param['max_shape']
profile.set_shape(input_name, min_shape, opt_shape, max_shape)
if config.add_optimization_profile(profile) < 0:
logger.warning(f'Invalid optimization profile {profile}.')
if fp16_mode:
if not getattr(builder, 'platform_has_fast_fp16', True):
logger.warning('Platform does not has fast native fp16.')
if version.parse(trt.__version__) < version.parse('8'):
builder.fp16_mode = fp16_mode
config.set_flag(trt.BuilderFlag.FP16)
if int8_mode:
if not getattr(builder, 'platform_has_fast_int8', True):
logger.warning('Platform does not has fast native int8.')
from .calib_utils import HDF5Calibrator
config.set_flag(trt.BuilderFlag.INT8)
assert int8_param is not None
config.int8_calibrator = HDF5Calibrator(
int8_param['calib_file'],
input_shapes,
model_type=int8_param['model_type'],
device_id=device_id,
algorithm=int8_param.get(
'algorithm', trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2))
if version.parse(trt.__version__) < version.parse('8'):
builder.int8_mode = int8_mode
builder.int8_calibrator = config.int8_calibrator
# create engine
if hasattr(builder, 'build_serialized_network'):
engine = builder.build_serialized_network(network, config)
else:
engine = builder.build_engine(network, config)
assert engine is not None, 'Failed to create TensorRT engine'
save(engine, output_file_prefix + '.engine')
return engine
def get_trt_log_level() -> trt.Logger.Severity:
"""Get tensorrt log level from root logger.
Returns:
level (tensorrt.Logger.Severity):
Logging level of tensorrt.Logger.
"""
logger = get_root_logger()
level = logger.level
trt_log_level = trt.Logger.INFO
if level == logging.ERROR:
trt_log_level = trt.Logger.ERROR
elif level == logging.WARNING:
trt_log_level = trt.Logger.WARNING
elif level == logging.DEBUG:
trt_log_level = trt.Logger.VERBOSE
return trt_log_level
The provided code snippet includes necessary dependencies for implementing the `onnx2tensorrt` function. Write a Python function `def onnx2tensorrt(work_dir: str, save_file: str, model_id: int, deploy_cfg: Union[str, mmengine.Config], onnx_model: Union[str, onnx.ModelProto], device: str = 'cuda:0', partition_type: str = 'end2end', **kwargs)` to solve the following problem:
Convert ONNX to TensorRT. Examples: >>> from mmdeploy.backend.tensorrt.onnx2tensorrt import onnx2tensorrt >>> work_dir = 'work_dir' >>> save_file = 'end2end.engine' >>> model_id = 0 >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_tensorrt_dynamic-320x320-1344x1344.py') >>> onnx_model = 'work_dir/end2end.onnx' >>> onnx2tensorrt(work_dir, save_file, model_id, deploy_cfg, onnx_model, 'cuda:0') Args: work_dir (str): A working directory. save_file (str): The base name of the file to save TensorRT engine. E.g. `end2end.engine`. model_id (int): Index of input model. deploy_cfg (str | mmengine.Config): Deployment config. onnx_model (str | onnx.ModelProto): input onnx model. device (str): A string specifying cuda device, defaults to 'cuda:0'. partition_type (str): Specifying partition type of a model, defaults to 'end2end'.
Here is the function:
def onnx2tensorrt(work_dir: str,
save_file: str,
model_id: int,
deploy_cfg: Union[str, mmengine.Config],
onnx_model: Union[str, onnx.ModelProto],
device: str = 'cuda:0',
partition_type: str = 'end2end',
**kwargs):
"""Convert ONNX to TensorRT.
Examples:
>>> from mmdeploy.backend.tensorrt.onnx2tensorrt import onnx2tensorrt
>>> work_dir = 'work_dir'
>>> save_file = 'end2end.engine'
>>> model_id = 0
>>> deploy_cfg = ('configs/mmdet/detection/'
'detection_tensorrt_dynamic-320x320-1344x1344.py')
>>> onnx_model = 'work_dir/end2end.onnx'
>>> onnx2tensorrt(work_dir, save_file, model_id, deploy_cfg,
onnx_model, 'cuda:0')
Args:
work_dir (str): A working directory.
save_file (str): The base name of the file to save TensorRT engine.
E.g. `end2end.engine`.
model_id (int): Index of input model.
deploy_cfg (str | mmengine.Config): Deployment config.
onnx_model (str | onnx.ModelProto): input onnx model.
device (str): A string specifying cuda device, defaults to 'cuda:0'.
partition_type (str): Specifying partition type of a model, defaults to
'end2end'.
"""
# load deploy_cfg if necessary
deploy_cfg = load_config(deploy_cfg)[0]
mmengine.mkdir_or_exist(osp.abspath(work_dir))
common_params = get_common_config(deploy_cfg)
model_params = get_model_inputs(deploy_cfg)[model_id]
final_params = common_params
final_params.update(model_params)
int8_param = final_params.get('int8_param', dict())
calib_file = get_calib_filename(deploy_cfg)
if calib_file is not None:
int8_param['calib_file'] = osp.join(work_dir, calib_file)
int8_param['model_type'] = partition_type
ir_config = get_ir_config(deploy_cfg)
input_names = ir_config.get('input_names', [])
input_shapes = final_params['input_shapes']
if not isinstance(input_shapes, Dict):
input_shapes = dict(zip(input_names, input_shapes))
assert device.startswith('cuda'), f'TensorRT requires cuda device, \
but given: {device}'
device_id = parse_device_id(device)
assert save_file.endswith(
'.engine'
), 'Expect save file ends with `.engine`.' f' but get {save_file}'
save_path = osp.join(work_dir, save_file)
from_onnx(
onnx_model,
osp.splitext(save_path)[0],
input_shapes=input_shapes,
log_level=get_trt_log_level(),
fp16_mode=final_params.get('fp16_mode', False),
int8_mode=final_params.get('int8_mode', False),
int8_param=int8_param,
max_workspace_size=final_params.get('max_workspace_size', 0),
device_id=device_id) | Convert ONNX to TensorRT. Examples: >>> from mmdeploy.backend.tensorrt.onnx2tensorrt import onnx2tensorrt >>> work_dir = 'work_dir' >>> save_file = 'end2end.engine' >>> model_id = 0 >>> deploy_cfg = ('configs/mmdet/detection/' 'detection_tensorrt_dynamic-320x320-1344x1344.py') >>> onnx_model = 'work_dir/end2end.onnx' >>> onnx2tensorrt(work_dir, save_file, model_id, deploy_cfg, onnx_model, 'cuda:0') Args: work_dir (str): A working directory. save_file (str): The base name of the file to save TensorRT engine. E.g. `end2end.engine`. model_id (int): Index of input model. deploy_cfg (str | mmengine.Config): Deployment config. onnx_model (str | onnx.ModelProto): input onnx model. device (str): A string specifying cuda device, defaults to 'cuda:0'. partition_type (str): Specifying partition type of a model, defaults to 'end2end'. |
188,814 | import logging
import os
import re
import sys
from typing import Any, Dict, Optional, Sequence, Union
import onnx
import tensorrt as trt
from packaging import version
from mmdeploy.utils import get_root_logger
from .init_plugins import load_tensorrt_plugin
def load_tensorrt_plugin() -> bool:
"""Load TensorRT plugins library.
Returns:
bool: True if TensorRT plugin library is successfully loaded.
"""
lib_path = get_ops_path()
success = False
logger = get_root_logger()
if os.path.exists(lib_path):
ctypes.CDLL(lib_path)
logger.info(f'Successfully loaded tensorrt plugins from {lib_path}')
success = True
else:
logger.warning(f'Could not load the library of tensorrt plugins. \
Because the file does not exist: {lib_path}')
return success
The provided code snippet includes necessary dependencies for implementing the `load` function. Write a Python function `def load(path: str, allocator: Optional[Any] = None) -> trt.ICudaEngine` to solve the following problem:
Deserialize TensorRT engine from disk. Args: path (str): The disk path to read the engine. allocator (Any): gpu allocator Returns: tensorrt.ICudaEngine: The TensorRT engine loaded from disk.
Here is the function:
def load(path: str, allocator: Optional[Any] = None) -> trt.ICudaEngine:
"""Deserialize TensorRT engine from disk.
Args:
path (str): The disk path to read the engine.
allocator (Any): gpu allocator
Returns:
tensorrt.ICudaEngine: The TensorRT engine loaded from disk.
"""
load_tensorrt_plugin()
with trt.Logger() as logger, trt.Runtime(logger) as runtime:
if allocator is not None:
runtime.gpu_allocator = allocator
with open(path, mode='rb') as f:
engine_bytes = f.read()
trt.init_libnvinfer_plugins(logger, namespace='')
engine = runtime.deserialize_cuda_engine(engine_bytes)
return engine | Deserialize TensorRT engine from disk. Args: path (str): The disk path to read the engine. allocator (Any): gpu allocator Returns: tensorrt.ICudaEngine: The TensorRT engine loaded from disk. |
188,815 | from typing import Any, Dict, Optional, Sequence, Union
import tensorrt as trt
import torch
from mmdeploy.utils import Backend
from mmdeploy.utils.timer import TimeCounter
from ..base import BACKEND_WRAPPER, BaseWrapper
from .init_plugins import load_tensorrt_plugin
from .torch_allocator import TorchAllocator
from .utils import load
The provided code snippet includes necessary dependencies for implementing the `torch_dtype_from_trt` function. Write a Python function `def torch_dtype_from_trt(dtype: trt.DataType) -> torch.dtype` to solve the following problem:
Convert pytorch dtype to TensorRT dtype. Args: dtype (str.DataType): The data type in tensorrt. Returns: torch.dtype: The corresponding data type in torch.
Here is the function:
def torch_dtype_from_trt(dtype: trt.DataType) -> torch.dtype:
"""Convert pytorch dtype to TensorRT dtype.
Args:
dtype (str.DataType): The data type in tensorrt.
Returns:
torch.dtype: The corresponding data type in torch.
"""
if dtype == trt.bool:
return torch.bool
elif dtype == trt.int8:
return torch.int8
elif dtype == trt.int32:
return torch.int32
elif dtype == trt.float16:
return torch.float16
elif dtype == trt.float32:
return torch.float32
else:
raise TypeError(f'{dtype} is not supported by torch') | Convert pytorch dtype to TensorRT dtype. Args: dtype (str.DataType): The data type in tensorrt. Returns: torch.dtype: The corresponding data type in torch. |
188,816 | from typing import Any, Dict, Optional, Sequence, Union
import tensorrt as trt
import torch
from mmdeploy.utils import Backend
from mmdeploy.utils.timer import TimeCounter
from ..base import BACKEND_WRAPPER, BaseWrapper
from .init_plugins import load_tensorrt_plugin
from .torch_allocator import TorchAllocator
from .utils import load
The provided code snippet includes necessary dependencies for implementing the `torch_device_from_trt` function. Write a Python function `def torch_device_from_trt(device: trt.TensorLocation)` to solve the following problem:
Convert pytorch device to TensorRT device. Args: device (trt.TensorLocation): The device in tensorrt. Returns: torch.device: The corresponding device in torch.
Here is the function:
def torch_device_from_trt(device: trt.TensorLocation):
"""Convert pytorch device to TensorRT device.
Args:
device (trt.TensorLocation): The device in tensorrt.
Returns:
torch.device: The corresponding device in torch.
"""
if device == trt.TensorLocation.DEVICE:
return torch.device('cuda')
elif device == trt.TensorLocation.HOST:
return torch.device('cpu')
else:
return TypeError(f'{device} is not supported by torch') | Convert pytorch device to TensorRT device. Args: device (trt.TensorLocation): The device in tensorrt. Returns: torch.device: The corresponding device in torch. |
188,817 | import os
from mmdeploy.utils import get_file_path
The provided code snippet includes necessary dependencies for implementing the `get_ops_path` function. Write a Python function `def get_ops_path() -> str` to solve the following problem:
Get the library path of onnxruntime custom ops. Returns: str: The library path to onnxruntime custom ops.
Here is the function:
def get_ops_path() -> str:
"""Get the library path of onnxruntime custom ops.
Returns:
str: The library path to onnxruntime custom ops.
"""
candidates = [
'../../lib/libmmdeploy_onnxruntime_ops.so',
'../../lib/mmdeploy_onnxruntime_ops.dll',
]
return get_file_path(os.path.dirname(__file__), candidates) | Get the library path of onnxruntime custom ops. Returns: str: The library path to onnxruntime custom ops. |
188,818 | import os
from mmdeploy.utils import get_file_path
The provided code snippet includes necessary dependencies for implementing the `get_lib_path` function. Write a Python function `def get_lib_path() -> str` to solve the following problem:
Get the library path of onnxruntime. Returns: str: The library path to onnxruntime.
Here is the function:
def get_lib_path() -> str:
"""Get the library path of onnxruntime.
Returns:
str: The library path to onnxruntime.
"""
candidates = [
'../../lib/libonnxruntime.so*',
'../../lib/onnxruntime.dll',
]
return get_file_path(os.path.dirname(__file__), candidates) | Get the library path of onnxruntime. Returns: str: The library path to onnxruntime. |
188,819 | import os.path as osp
def get_ops_path() -> str:
"""Get path of the torchscript extension library.
Returns:
str: A path of the torchscript extension library.
"""
from mmdeploy.utils import get_file_path
candidates = [
'../../lib/libmmdeploy_torchscript_ops.so',
'../../lib/mmdeploy_torchscript_ops.dll',
'../../../build/lib/libmmdeploy_torchscript_ops.so',
'../../../build/bin/*/mmdeploy_torchscript_ops.dll'
]
return get_file_path(osp.dirname(__file__), candidates)
The provided code snippet includes necessary dependencies for implementing the `ops_available` function. Write a Python function `def ops_available() -> bool` to solve the following problem:
Return whether ops are available. Returns: bool: Whether ops are available.
Here is the function:
def ops_available() -> bool:
"""Return whether ops are available.
Returns:
bool: Whether ops are available.
"""
return osp.exists(get_ops_path()) | Return whether ops are available. Returns: bool: Whether ops are available. |
188,820 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
class Context:
"""Trace Context."""
def __init__(self):
self.dtype = None
self.transforms = []
def load(context: Context, args: Dict):
default_args = {'to_float32': False, 'color_type': 'color'}
color_type = args.get('color_type', default_args['color_type'])
if color_type == 'color' or \
color_type == 'color_ignore_orientation':
context.transforms.append({'type': 'cvtColorBGR'})
else:
context.transforms.append({'type': 'cvtColorGray'})
to_float32 = args.get('to_float32', default_args['to_float32'])
if to_float32 is True:
context.transforms.append({'type': 'CastFloat'})
context.dtype = 'float32'
return True | null |
188,821 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
class Context:
"""Trace Context."""
def __init__(self):
self.dtype = None
self.transforms = []
def default_format_bundle(context: Context, args: Dict):
default_args = {'img_to_float': True}
img_to_float = args.get('img_to_float', default_args['img_to_float'])
if img_to_float and (context.dtype is None or context.dtype != 'float32'):
context.transforms.append({'type': 'CastFloat'})
context.dtype = 'float32'
context.transforms.append({'type': 'HWC2CHW'})
return True | null |
188,822 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
class Context:
def __init__(self):
def resize(context: Context, args: Dict):
context.transforms.append({'type': 'Resize'})
return True | null |
188,823 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
class Context:
"""Trace Context."""
def __init__(self):
self.dtype = None
self.transforms = []
def center_crop(context: Context, args: Dict):
context.transforms.append({'type': 'CenterCrop'})
return True | null |
188,824 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
class Context:
"""Trace Context."""
def __init__(self):
self.dtype = None
self.transforms = []
def normalize(context: Context, args: Dict):
default_args = {'to_rgb': True}
if context.dtype is None or context.dtype != 'float32':
context.transforms.append({'type': 'CastFloat'})
context.dtype = 'float32'
to_rgb = args.get('to_rgb', default_args['to_rgb'])
if to_rgb is True:
context.transforms.append({'type': 'cvtColorRGB'})
context.transforms.append({'type': 'Normalize'})
return True | null |
188,825 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
class Context:
"""Trace Context."""
def __init__(self):
self.dtype = None
self.transforms = []
def image_to_tensor(context: Context, args: Dict):
context.transforms.append({'type': 'HWC2CHW'})
return True | null |
188,826 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
class Context:
"""Trace Context."""
def __init__(self):
self.dtype = None
self.transforms = []
def pad(context: Context, args: Dict):
if context.dtype != 'float32':
return False
context.transforms.append({'type': 'Pad'})
return True | null |
188,827 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
def add_transform_tag(pipeline_info: Dict, tag: str) -> Dict:
if tag is None:
return pipeline_info
pipeline_info['pipeline']['tasks'][0]['sha256'] = tag
pipeline_info['pipeline']['tasks'][0]['fuse_transform'] = False
return pipeline_info | null |
188,828 | import json
from hashlib import sha256
from typing import Dict, List, Tuple
_TRANSFORM_WRAPPER = TraceFunc()
class Context:
"""Trace Context."""
def __init__(self):
self.dtype = None
self.transforms = []
The provided code snippet includes necessary dependencies for implementing the `get_transform_static` function. Write a Python function `def get_transform_static(transforms: List) -> Tuple` to solve the following problem:
Get the static transform information for Elena use. Args: transforms (List): transforms in model_cfg Return: tuple(): Composed of the static transform information and the tag.
Here is the function:
def get_transform_static(transforms: List) -> Tuple:
"""Get the static transform information for Elena use.
Args:
transforms (List): transforms in model_cfg
Return:
tuple(): Composed of the static transform information and the tag.
"""
# Current only support basic transform
supported_type = [
'LoadImageFromFile', 'DefaultFormatBundle', 'Resize', 'CenterCrop',
'Normalize', 'ImageToTensor', 'Collect', 'Pad'
]
# each transform can only appear once
cnt = {}
for trans in transforms:
tp = trans['type']
if tp not in supported_type:
return None, None
if tp in cnt:
return None, None
cnt[tp] = 1
context = Context()
for trans in transforms:
tp = trans['type']
if tp == 'Collect':
continue
args = trans
func = _TRANSFORM_WRAPPER.get(tp)
if func(context, args) is False:
return None, None
if context.dtype != 'float32':
return None, None
tag = sha256(json.dumps(context.transforms).encode('utf-8')).hexdigest()
return context.transforms, tag | Get the static transform information for Elena use. Args: transforms (List): transforms in model_cfg Return: tuple(): Composed of the static transform information and the tag. |
188,829 | import importlib
import re
from typing import Dict, List, Tuple, Union
import mmengine
from mmdeploy.apis import build_task_processor
from mmdeploy.utils import (Backend, Task, get_backend, get_codebase,
get_ir_config, get_partition_config, get_precision,
get_root_logger, get_task_type, is_dynamic_batch,
load_config)
from mmdeploy.utils.config_utils import get_backend_config
from mmdeploy.utils.constants import SDK_TASK_MAP as task_map
def get_deploy(deploy_cfg: mmengine.Config, model_cfg: mmengine.Config,
work_dir: str, device: str) -> Dict:
"""Get the inference information for pipeline.json.
Args:
deploy_cfg (mmengine.Config): Deploy config dict.
model_cfg (mmengine.Config): The model config dict.
work_dir (str): Work dir to save json files.
device (str): The device passed in.
Return:
dict: Composed of version, task, models and customs.
"""
task = get_task_type(deploy_cfg)
cls_name = task_map[task]['cls_name']
_, customs = get_model_name_customs(
deploy_cfg, model_cfg, work_dir=work_dir, device=device)
version = get_mmdeploy_version()
models = get_models(deploy_cfg, model_cfg, work_dir, device)
return dict(version=version, task=cls_name, models=models, customs=customs)
def get_pipeline(deploy_cfg: mmengine.Config, model_cfg: mmengine.Config,
work_dir: str, device: str) -> Dict:
"""Get the inference information for pipeline.json.
Args:
deploy_cfg (mmengine.Config): Deploy config dict.
model_cfg (mmengine.Config): The model config dict.
work_dir (str): Work dir to save json files.
device (str): The device passed in.
Return:
dict: Composed of input node name, output node name and the tasks.
"""
preprocess = get_preprocess(deploy_cfg, model_cfg, device=device)
infer_info = get_inference_info(
deploy_cfg, model_cfg, work_dir=work_dir, device=device)
postprocess = get_postprocess(
deploy_cfg, model_cfg, work_dir, device=device)
task = get_task_type(deploy_cfg)
input_names = preprocess['input']
output_names = postprocess['output']
if task in [
Task.CLASSIFICATION, Task.SUPER_RESOLUTION, Task.VIDEO_RECOGNITION
]:
postprocess['input'] = infer_info['output']
else:
postprocess['input'] = preprocess['output'] + infer_info['output']
return dict(
pipeline=dict(
input=input_names,
output=output_names,
tasks=[preprocess, infer_info, postprocess]))
def get_detail(deploy_cfg: mmengine.Config, model_cfg: mmengine.Config,
pth: str) -> Dict:
"""Get the detail information for detail.json.
Args:
deploy_cfg (mmengine.Config): Deploy config dict.
model_cfg (mmengine.Config): The model config dict.
pth (str): The checkpoint weight of pytorch model.
Return:
dict: Composed of version, codebase, codebase_config, onnx_config,
backend_config and calib_config.
"""
version = get_mmdeploy_version()
codebase = get_task(deploy_cfg)
codebase['pth'] = pth
codebase['config'] = model_cfg.filename
codebase_config = deploy_cfg.get('codebase_config', dict())
ir_config = get_ir_config(deploy_cfg)
backend_config = deploy_cfg.get('backend_config', dict())
calib_config = deploy_cfg.get('calib_config', dict())
return dict(
version=version,
codebase=codebase,
codebase_config=codebase_config,
onnx_config=ir_config,
backend_config=backend_config,
calib_config=calib_config)
The provided code snippet includes necessary dependencies for implementing the `export2SDK` function. Write a Python function `def export2SDK(deploy_cfg: Union[str, mmengine.Config], model_cfg: Union[str, mmengine.Config], work_dir: str, pth: str, device: str, **kwargs)` to solve the following problem:
Export information to SDK. This function dump `deploy.json`, `pipeline.json` and `detail.json` to work dir. Args: deploy_cfg (str | mmengine.Config): Deploy config file or dict. model_cfg (str | mmengine.Config): Model config file or dict. work_dir (str): Work dir to save json files. pth (str): The path of the model checkpoint weights. device (str): The device passed in.
Here is the function:
def export2SDK(deploy_cfg: Union[str, mmengine.Config],
model_cfg: Union[str, mmengine.Config], work_dir: str, pth: str,
device: str, **kwargs):
"""Export information to SDK.
This function dump `deploy.json`,
`pipeline.json` and `detail.json` to work dir.
Args:
deploy_cfg (str | mmengine.Config): Deploy config file or dict.
model_cfg (str | mmengine.Config): Model config file or dict.
work_dir (str): Work dir to save json files.
pth (str): The path of the model checkpoint weights.
device (str): The device passed in.
"""
deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
deploy_info = get_deploy(deploy_cfg, model_cfg, work_dir, device)
pipeline_info = get_pipeline(deploy_cfg, model_cfg, work_dir, device)
detail_info = get_detail(deploy_cfg, model_cfg, pth=pth)
mmengine.dump(
deploy_info,
'{}/deploy.json'.format(work_dir),
sort_keys=False,
indent=4)
mmengine.dump(
pipeline_info,
'{}/pipeline.json'.format(work_dir),
sort_keys=False,
indent=4)
mmengine.dump(
detail_info,
'{}/detail.json'.format(work_dir),
sort_keys=False,
indent=4) | Export information to SDK. This function dump `deploy.json`, `pipeline.json` and `detail.json` to work dir. Args: deploy_cfg (str | mmengine.Config): Deploy config file or dict. model_cfg (str | mmengine.Config): Model config file or dict. work_dir (str): Work dir to save json files. pth (str): The path of the model checkpoint weights. device (str): The device passed in. |
188,830 | from typing import Optional, Union
import mmengine
from packaging import version
from rknn.api import RKNN
from mmdeploy.utils import (get_common_config, get_normalization,
get_onnx_config, get_partition_config,
get_quantization_config, get_rknn_quantization,
get_root_logger, load_config)
from mmdeploy.utils.config_utils import get_backend_config
def rknn_package_info():
"""Get the rknn package information."""
import pkg_resources
toolkit = pkg_resources.working_set.by_key.get('rknn-toolkit', None)
toolkit = pkg_resources.working_set.by_key.get('rknn-toolkit2', toolkit)
if toolkit is None:
return dict(name=None, version=None)
else:
return dict(name=toolkit.project_name, version=toolkit.version)
def get_backend_config(deploy_cfg: Union[str, mmengine.Config]) -> Dict:
"""Get the backend_config from the config.
Args:
deploy_cfg (str | mmengine.Config): The path or content of config.
Returns:
Dict : backend config dict.
"""
deploy_cfg = load_config(deploy_cfg)[0]
backend_config = deploy_cfg.get('backend_config', {})
return backend_config
The provided code snippet includes necessary dependencies for implementing the `onnx2rknn` function. Write a Python function `def onnx2rknn(onnx_model: str, output_path: str, deploy_cfg: Union[str, mmengine.Config], model_cfg: Optional[Union[str, mmengine.Config]] = None, dataset_file: Optional[str] = None, **kwargs)` to solve the following problem:
Convert ONNX to RKNN. RKNN-Toolkit2 is a software development kit for users to perform model conversion, inference and performance evaluation on PC and Rockchip NPU platforms. Args: onnx_model (str): Input onnx model. output_path (str): File path to save RKNN model. deploy_cfg (str | mmengine.Config): The path or content of config. model_cfg (str | mmengine.Config): The path or content of model config. dataset_file (str | None): The dataset file for quatization. Default to None.
Here is the function:
def onnx2rknn(onnx_model: str,
output_path: str,
deploy_cfg: Union[str, mmengine.Config],
model_cfg: Optional[Union[str, mmengine.Config]] = None,
dataset_file: Optional[str] = None,
**kwargs):
"""Convert ONNX to RKNN.
RKNN-Toolkit2 is a software development kit for users to perform model
conversion, inference and performance evaluation on PC and Rockchip
NPU platforms.
Args:
onnx_model (str): Input onnx model.
output_path (str): File path to save RKNN model.
deploy_cfg (str | mmengine.Config): The path or content of config.
model_cfg (str | mmengine.Config): The path or content of model config.
dataset_file (str | None): The dataset file for quatization. Default to
None.
"""
logger = get_root_logger()
# load deploy_cfg if necessary
deploy_cfg = load_config(deploy_cfg)[0]
common_params = get_common_config(deploy_cfg)
onnx_params = get_onnx_config(deploy_cfg)
quantization_cfg = get_quantization_config(deploy_cfg)
package_info = rknn_package_info()
input_names = onnx_params.get('input_names', None)
output_names = onnx_params.get('output_names', None)
input_size_list = get_backend_config(deploy_cfg).get(
'input_size_list', None)
# rknn-toolkit 1.5+ can not pass input output info, which is weird
if package_info['name'] == 'rknn-toolkit2' and version.parse(
package_info['version']) > version.parse('1.4'):
input_names, output_names, input_size_list = [None] * 3
# update norm value
if get_rknn_quantization(deploy_cfg) is True and model_cfg is not None:
transform = get_normalization(model_cfg)
common_params.update(
dict(
mean_values=[transform['mean']],
std_values=[transform['std']]))
# update output_names for partition models
if get_partition_config(deploy_cfg) is not None:
import onnx
_onnx_model = onnx.load(onnx_model)
output_names = [node.name for node in _onnx_model.graph.output]
rknn = RKNN(verbose=True)
rknn.config(**common_params)
ret = rknn.load_onnx(
model=onnx_model,
inputs=input_names,
input_size_list=input_size_list,
outputs=output_names)
if ret != 0:
logger.error('Load model failed!')
exit(ret)
dataset_cfg = quantization_cfg.get('dataset', None)
if dataset_cfg is None:
quantization_cfg.update(dict(dataset=dataset_file))
if dataset_file is None:
quantization_cfg.update(dict(do_quantization=False))
logger.warning('no dataset passed in, quantization is skipped')
if package_info['name'] == 'rknn-toolkit2':
quantization_cfg.pop('pre_compile', None)
ret = rknn.build(**quantization_cfg)
if ret != 0:
logger.error('Build model failed!')
exit(ret)
ret = rknn.export_rknn(output_path)
if ret != 0:
logger.error('Export rknn model failed!')
exit(ret) | Convert ONNX to RKNN. RKNN-Toolkit2 is a software development kit for users to perform model conversion, inference and performance evaluation on PC and Rockchip NPU platforms. Args: onnx_model (str): Input onnx model. output_path (str): File path to save RKNN model. deploy_cfg (str | mmengine.Config): The path or content of config. model_cfg (str | mmengine.Config): The path or content of model config. dataset_file (str | None): The dataset file for quatization. Default to None. |
188,831 | import os.path as osp
import subprocess
import tempfile
from subprocess import PIPE, CalledProcessError, run
from typing import Dict, Optional, Sequence, Union
import mmengine
import onnx
from mmdeploy.utils import get_root_logger
from .utils import ModelOptimizerOptions
def get_mo_command() -> str:
"""Checks for possible commands to run Model Optimizer. The following
commands will be tested:
'mo.py' - if you installed OpenVINO using the installer.
'mo' - if you installed OpenVINO with pip.
Returns:
str: Command to run Model Optimizer. If it is not available,
the empty string "" will be returned.
"""
mo_command = ''
mo_commands = ['mo.py', 'mo']
for command in mo_commands:
is_available = True
try:
run(f'{command} -h',
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
shell=True,
check=True)
except CalledProcessError:
is_available = False
if is_available:
mo_command = command
return mo_command
def get_output_model_file(onnx_path: str, work_dir: str) -> str:
"""Returns the path to the .xml file with export result.
Args:
onnx_path (str): The path to the onnx model.
work_dir (str): The path to the directory for saving the results.
Returns:
str: The path to the file where the export result will be located.
"""
mmengine.mkdir_or_exist(osp.abspath(work_dir))
file_name = osp.splitext(osp.split(onnx_path)[1])[0]
model_xml = osp.join(work_dir, file_name + '.xml')
return model_xml
class ModelOptimizerOptions:
"""A class to make it easier to support additional arguments for the Model
Optimizer that can be passed through the deployment configuration.
Example:
>>> deploy_cfg = load_config(deploy_cfg_path)
>>> mo_options = deploy_cfg.get('mo_options', None)
>>> mo_options = ModelOptimizerOptions(mo_options)
>>> mo_args = mo_options.get_options()
"""
def __init__(self,
mo_options: Optional[Dict[str, Union[Dict, List]]] = None):
self.args = ''
self.flags = ''
if mo_options is not None:
self.args = self.__parse_args(mo_options)
self.flags = self.__parse_flags(mo_options)
def __parse_args(self, mo_options: Dict[str, Union[Dict, List]]) -> str:
"""Parses a dictionary with arguments into a string."""
mo_args_str = ''
if 'args' in mo_options:
for key, value in mo_options['args'].items():
value_str = f'"{value}"' if isinstance(value, list) else value
mo_args_str += f'{key}={value_str} '
return mo_args_str
def __parse_flags(self, mo_options: Dict[str, Union[Dict, List]]) -> str:
"""Parses a list with flags into a string."""
mo_flags_str = ''
if 'flags' in mo_options:
mo_flags_str += ' '.join(mo_options['flags'])
return mo_flags_str
def get_options(self) -> str:
"""Returns a string with additional arguments for the Model Optimizer.
If there are no additional arguments, it will return an empty string.
"""
return self.args + self.flags
The provided code snippet includes necessary dependencies for implementing the `from_onnx` function. Write a Python function `def from_onnx(onnx_model: Union[str, onnx.ModelProto], output_file_prefix: str, input_info: Dict[str, Sequence[int]], output_names: Sequence[str], mo_options: Optional[ModelOptimizerOptions] = None)` to solve the following problem:
Convert ONNX to OpenVINO. Examples: >>> from mmdeploy.apis.openvino import from_onnx >>> input_info = {'input': [1,3,800,1344]} >>> output_names = ['dets', 'labels'] >>> onnx_path = 'work_dir/end2end.onnx' >>> output_dir = 'work_dir' >>> from_onnx( onnx_path, output_dir, input_info, output_names) Args: onnx_model (str|ModelProto): The onnx model or its path. output_file_prefix (str): The path to the directory for saving the results. input_info (Dict[str, Sequence[int]]): The shape of each input. output_names (Sequence[str]): Output names. Example: ['dets', 'labels']. mo_options (None | ModelOptimizerOptions): The class with additional arguments for the Model Optimizer.
Here is the function:
def from_onnx(onnx_model: Union[str, onnx.ModelProto],
output_file_prefix: str,
input_info: Dict[str, Sequence[int]],
output_names: Sequence[str],
mo_options: Optional[ModelOptimizerOptions] = None):
"""Convert ONNX to OpenVINO.
Examples:
>>> from mmdeploy.apis.openvino import from_onnx
>>> input_info = {'input': [1,3,800,1344]}
>>> output_names = ['dets', 'labels']
>>> onnx_path = 'work_dir/end2end.onnx'
>>> output_dir = 'work_dir'
>>> from_onnx( onnx_path, output_dir, input_info, output_names)
Args:
onnx_model (str|ModelProto): The onnx model or its path.
output_file_prefix (str): The path to the directory for saving
the results.
input_info (Dict[str, Sequence[int]]):
The shape of each input.
output_names (Sequence[str]): Output names. Example:
['dets', 'labels'].
mo_options (None | ModelOptimizerOptions): The class with
additional arguments for the Model Optimizer.
"""
work_dir = output_file_prefix
input_names = ','.join(input_info.keys())
input_shapes = ','.join(str(list(elem)) for elem in input_info.values())
output = ','.join(output_names)
mo_command = get_mo_command()
is_mo_available = bool(mo_command)
if not is_mo_available:
raise RuntimeError(
'OpenVINO Model Optimizer is not found or configured improperly')
if isinstance(onnx_model, str):
onnx_path = onnx_model
else:
onnx_path = tempfile.NamedTemporaryFile(suffix='.onnx').name
onnx.save(onnx_model, onnx_path)
mo_args = f'--input_model="{onnx_path}" '\
f'--output_dir="{work_dir}" ' \
f'--output="{output}" ' \
f'--input="{input_names}" ' \
f'--input_shape="{input_shapes}" '
if mo_options is not None:
mo_args += mo_options.get_options()
command = f'{mo_command} {mo_args}'
logger = get_root_logger()
logger.info(f'Args for Model Optimizer: {command}')
mo_output = run(command, stdout=PIPE, stderr=PIPE, shell=True, check=True)
logger.info(mo_output.stdout.decode())
logger.debug(mo_output.stderr.decode())
model_xml = get_output_model_file(onnx_path, work_dir)
logger.info(f'Successfully exported OpenVINO model: {model_xml}') | Convert ONNX to OpenVINO. Examples: >>> from mmdeploy.apis.openvino import from_onnx >>> input_info = {'input': [1,3,800,1344]} >>> output_names = ['dets', 'labels'] >>> onnx_path = 'work_dir/end2end.onnx' >>> output_dir = 'work_dir' >>> from_onnx( onnx_path, output_dir, input_info, output_names) Args: onnx_model (str|ModelProto): The onnx model or its path. output_file_prefix (str): The path to the directory for saving the results. input_info (Dict[str, Sequence[int]]): The shape of each input. output_names (Sequence[str]): Output names. Example: ['dets', 'labels']. mo_options (None | ModelOptimizerOptions): The class with additional arguments for the Model Optimizer. |
188,832 | import os
import os.path as osp
import onnx
import tvm
import tvm.relay as relay
from vacc import quantize
The provided code snippet includes necessary dependencies for implementing the `from_onnx` function. Write a Python function `def from_onnx(onnx_model: str, output_path: str, model_input: dict, model_name: str, **kwargs)` to solve the following problem:
Convert ONNX to VACC. Args: onnx_model (str): Input onnx model. output_path (str): File path to save VACC model. model_input (dict): model input config. model_name (str): model name.
Here is the function:
def from_onnx(onnx_model: str, output_path: str, model_input: dict,
model_name: str, **kwargs):
"""Convert ONNX to VACC.
Args:
onnx_model (str): Input onnx model.
output_path (str): File path to save VACC model.
model_input (dict): model input config.
model_name (str): model name.
"""
target = tvm.target.vacc()
quant_mode = model_input.get('qconfig', {}).get('dtype', 'fp16')
assert quant_mode in ['int8', 'fp16'], quant_mode + ' not support now'
shape_dict = model_input['shape']
mod, params = relay.frontend.from_onnx(onnx.load(onnx_model), shape_dict)
func = mod['main']
mod = relay.Module.from_expr(func)
if quant_mode == 'int8':
import random
import h5py
data = h5py.File(osp.join(output_path, 'calib_data.h5'),
'r')['calib_data']['input']
calib_data = []
index = list(range(len(data)))
random.shuffle(index)
calib_num = model_input.get('qconfig', {}).get('calib_num', 1000)
for i in index[:calib_num]:
calib_data.append({
list(shape_dict.keys())[0]:
tvm.nd.array(data[str(i)][:].astype('float32'))
})
with quantize.qconfig(
calibrate_mode=model_input.get('qconfig',
{}).get('calibrate_mode',
'percentile'),
skip_conv_layers=model_input.get('qconfig', {}).get(
'skip_conv_layers', []),
weight_scale=model_input.get('qconfig',
{}).get('weight_scale', 'max'),
quantize_per_channel=model_input.get('qconfig', {}).get(
'per_channel', False)):
qmod = quantize.quantize(mod, params, calib_data)
qmod = qmod['main']
mod = relay.Module.from_expr(qmod)
params = None
data_type = 2
else:
data_type = 0
with tvm.build_config(
data_type=data_type,
data_transport_mode=model_input.get('qconfig',
{}).get('data_transmode', 1),
mem_inplace=True,
cluster_mode=model_input.get('qconfig', {}).get('cluster_mode',
0)):
with relay.build_config(
opt_level=2, stream_mode=True, enable_float_to_half=True):
graph, lib, params = relay.build(
mod=mod, target=target, params=params)
save_dir = '-'.join([model_name, quant_mode])
output_root = osp.join(output_path, save_dir)
if not osp.exists(output_root):
os.makedirs(output_root)
libpath = os.path.join(output_root, model_name + '.so')
lib.export_library(libpath)
graph_json_path = os.path.join(output_root, model_name + '.json')
with open(graph_json_path, 'w') as f:
f.write(graph)
param_path = os.path.join(output_root, model_name + '.params')
with open(param_path, 'wb') as f:
f.write(relay.save_param_dict(params))
assert osp.exists(os.path.join(output_root,
model_name + '.params')), 'onnx2vacc failed'
return [
os.path.join(output_root, model_name + '.so'),
os.path.join(output_root, model_name + '.json'),
os.path.join(output_root, model_name + '.params')
] | Convert ONNX to VACC. Args: onnx_model (str): Input onnx model. output_path (str): File path to save VACC model. model_input (dict): model input config. model_name (str): model name. |
188,833 | import importlib
import logging
from abc import ABCMeta
from typing import Any, Callable, Optional, Sequence
class BaseBackendManager(metaclass=ABCMeta):
"""Abstract interface of backend manager."""
def build_wrapper(cls,
backend_files: Sequence[str],
device: str = 'cpu',
input_names: Optional[Sequence[str]] = None,
output_names: Optional[Sequence[str]] = None,
deploy_cfg: Optional[Any] = None,
**kwargs):
"""Build the wrapper for the backend model.
Args:
backend_files (Sequence[str]): Backend files.
device (str, optional): The device info. Defaults to 'cpu'.
input_names (Optional[Sequence[str]], optional): input names.
Defaults to None.
output_names (Optional[Sequence[str]], optional): output names.
Defaults to None.
deploy_cfg (Optional[Any], optional): The deploy config. Defaults
to None.
"""
raise NotImplementedError(
f'build_wrapper has not been implemented for `{cls.__name__}`')
def is_available(cls, with_custom_ops: bool = False) -> bool:
"""Check whether backend is installed.
Args:
with_custom_ops (bool): check custom ops exists.
Returns:
bool: True if backend package is installed.
"""
raise NotImplementedError(
f'is_available has not been implemented for "{cls.__name__}"')
def get_version(cls) -> str:
"""Get the version of the backend."""
raise NotImplementedError(
f'get_version has not been implemented for "{cls.__name__}"')
def check_env(cls, log_callback: Callable = lambda _: _) -> str:
"""Check current environment.
Returns:
str: Info about the environment.
"""
try:
available = cls.is_available()
if available:
try:
backend_version = cls.get_version()
except NotImplementedError:
backend_version = 'Unknown'
else:
backend_version = 'None'
info = f'{cls.backend_name}:\t{backend_version}'
except Exception:
info = f'{cls.backend_name}:\tCheckFailed'
log_callback(info)
return info
def to_backend(cls,
ir_files: Sequence[str],
work_dir: str,
deploy_cfg: Any,
log_level: int = logging.INFO,
device: str = 'cpu',
**kwargs) -> Sequence[str]:
"""Convert intermediate representation to given backend.
Args:
ir_files (Sequence[str]): The intermediate representation files.
work_dir (str): The work directory, backend files and logs should
be saved in this directory.
deploy_cfg (Any): The deploy config.
log_level (int, optional): The log level. Defaults to logging.INFO.
device (str, optional): The device type. Defaults to 'cpu'.
Returns:
Sequence[str]: Backend files.
"""
raise NotImplementedError(
f'to_backend has not been implemented for `{cls.__name__}`')
BACKEND_MANAGERS = BackendManagerRegistry()
The provided code snippet includes necessary dependencies for implementing the `get_backend_manager` function. Write a Python function `def get_backend_manager(name: str) -> BaseBackendManager` to solve the following problem:
Get backend manager. Args: name (str): name of the backend. Returns: BaseBackendManager: The backend manager of given name
Here is the function:
def get_backend_manager(name: str) -> BaseBackendManager:
"""Get backend manager.
Args:
name (str): name of the backend.
Returns:
BaseBackendManager: The backend manager of given name
"""
from enum import Enum
if isinstance(name, Enum):
name = name.value
return BACKEND_MANAGERS.find(name) | Get backend manager. Args: name (str): name of the backend. Returns: BaseBackendManager: The backend manager of given name |