prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: skip-file
import argparse
import sys
# pylint: disable=import-error
import resnet.model as resnet_model
# pylint: disable=import-error
import shufflenet.model as snet_model
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit
def dump_static_graph(model, graph_name, shape):
model.eval()
data = mge.Tensor(np.ones(shape, dtype=np.uint8))
@jit.trace(capture_as_const=True)
def pred_func(data):
out = data.astype(np.float32)
output_h, output_w = 224, 224
# resize
print(shape)
M = mge.tensor(np.array([[1,0,0], [0,1,0], [0,0,1]], dtype=np.float32))
M_shape = F.concat([data.shape[0],M.shape])
M = F.broadcast_to(M, M_shape)
out = F.vision.warp_perspective(out, M, (output_h, output_w), format='NHWC')
# mean
_mean = mge.Tensor(np.array([103.530, 116.280, 123.675], dtype=np.float32))
out = F.sub(out, _mean)
# div
_div = mge.Tensor(np.array([57.375, 57.120, 58.395], dtype=np.float32))
out = | F.div(out, _div) | megengine.functional.div |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: skip-file
import argparse
import sys
# pylint: disable=import-error
import resnet.model as resnet_model
# pylint: disable=import-error
import shufflenet.model as snet_model
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit
def dump_static_graph(model, graph_name, shape):
model.eval()
data = mge.Tensor(np.ones(shape, dtype=np.uint8))
@jit.trace(capture_as_const=True)
def pred_func(data):
out = data.astype(np.float32)
output_h, output_w = 224, 224
# resize
print(shape)
M = mge.tensor(np.array([[1,0,0], [0,1,0], [0,0,1]], dtype=np.float32))
M_shape = F.concat([data.shape[0],M.shape])
M = F.broadcast_to(M, M_shape)
out = F.vision.warp_perspective(out, M, (output_h, output_w), format='NHWC')
# mean
_mean = mge.Tensor(np.array([103.530, 116.280, 123.675], dtype=np.float32))
out = F.sub(out, _mean)
# div
_div = mge.Tensor(np.array([57.375, 57.120, 58.395], dtype=np.float32))
out = F.div(out, _div)
# dimshuffile
out = | F.transpose(out, (0,3,1,2)) | megengine.functional.transpose |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = | M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024) | megengine.module.Linear |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = | M.Linear(1024, 1024) | megengine.module.Linear |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = | M.Linear(1024, cfg.num_classes + 1) | megengine.module.Linear |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = | M.Linear(1024, (cfg.num_classes + 1) * 4) | megengine.module.Linear |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
| M.init.normal_(self.pred_cls.weight, std=0.01) | megengine.module.init.normal_ |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
| M.init.normal_(self.pred_delta.weight, std=0.001) | megengine.module.init.normal_ |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = | F.flatten(pool_features, start_axis=1) | megengine.functional.flatten |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = F.argmax(overlaps_normal, axis=1)
max_overlaps_ignore = overlaps_ignore.max(axis=1)
gt_assignment_ignore = F.argmax(overlaps_ignore, axis=1)
ignore_assign_mask = (max_overlaps_normal < self.cfg.fg_threshold) * (
max_overlaps_ignore > max_overlaps_normal
)
max_overlaps = (
max_overlaps_normal * (1 - ignore_assign_mask)
+ max_overlaps_ignore * ignore_assign_mask
)
gt_assignment = (
gt_assignment_normal * (1 - ignore_assign_mask)
+ gt_assignment_ignore * ignore_assign_mask
)
gt_assignment = gt_assignment.astype("int32")
labels = gt_boxes_per_img.ai[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) * (
labels != self.cfg.ignore_label
)
bg_mask = (max_overlaps < self.cfg.bg_threshold_high) * (
max_overlaps >= self.cfg.bg_threshold_low
)
num_fg_rois = self.cfg.num_rois * self.cfg.fg_ratio
fg_inds_mask = self._bernoulli_sample_masks(fg_mask, num_fg_rois, 1)
num_bg_rois = self.cfg.num_rois - fg_inds_mask.sum()
bg_inds_mask = self._bernoulli_sample_masks(bg_mask, num_bg_rois, 1)
labels = labels * fg_inds_mask
keep_mask = fg_inds_mask + bg_inds_mask
_, keep_inds = F.cond_take(keep_mask == 1, keep_mask)
# Add next line to avoid memory exceed
keep_inds = keep_inds[: F.minimum(self.cfg.num_rois, keep_inds.shapeof(0))]
# labels
labels = labels.ai[keep_inds].astype("int32")
rois = all_rois.ai[keep_inds]
target_boxes = gt_boxes_per_img.ai[gt_assignment.ai[keep_inds], :4]
bbox_targets = self.box_coder.encode(rois[:, 1:5], target_boxes)
bbox_targets = bbox_targets.reshape(-1, 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
return (
F.zero_grad(F.concat(return_rois, axis=0)),
F.zero_grad(F.concat(return_labels, axis=0)),
F.zero_grad(F.concat(return_bbox_targets, axis=0)),
)
def _bernoulli_sample_masks(self, masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = masks == sample_value
num_mask = sample_mask.sum()
num_final_samples = | F.minimum(num_mask, num_samples) | megengine.functional.minimum |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
| M.init.normal_(l.weight, std=0.01) | megengine.module.init.normal_ |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
| M.init.fill_(l.bias, 0) | megengine.module.init.fill_ |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
| M.init.fill_(l.bias, 0) | megengine.module.init.fill_ |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = | F.indexing_one_hot(pred_offsets, vlabels, axis=1) | megengine.functional.indexing_one_hot |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = | F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = | F.cond_take(batch_roi_mask == 1, batch_roi_mask) | megengine.functional.cond_take |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = | F.concat([rpn_rois.ai[batch_roi_inds], gt_rois]) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = | F.argmax(overlaps_normal, axis=1) | megengine.functional.argmax |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = F.argmax(overlaps_normal, axis=1)
max_overlaps_ignore = overlaps_ignore.max(axis=1)
gt_assignment_ignore = | F.argmax(overlaps_ignore, axis=1) | megengine.functional.argmax |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = F.argmax(overlaps_normal, axis=1)
max_overlaps_ignore = overlaps_ignore.max(axis=1)
gt_assignment_ignore = F.argmax(overlaps_ignore, axis=1)
ignore_assign_mask = (max_overlaps_normal < self.cfg.fg_threshold) * (
max_overlaps_ignore > max_overlaps_normal
)
max_overlaps = (
max_overlaps_normal * (1 - ignore_assign_mask)
+ max_overlaps_ignore * ignore_assign_mask
)
gt_assignment = (
gt_assignment_normal * (1 - ignore_assign_mask)
+ gt_assignment_ignore * ignore_assign_mask
)
gt_assignment = gt_assignment.astype("int32")
labels = gt_boxes_per_img.ai[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) * (
labels != self.cfg.ignore_label
)
bg_mask = (max_overlaps < self.cfg.bg_threshold_high) * (
max_overlaps >= self.cfg.bg_threshold_low
)
num_fg_rois = self.cfg.num_rois * self.cfg.fg_ratio
fg_inds_mask = self._bernoulli_sample_masks(fg_mask, num_fg_rois, 1)
num_bg_rois = self.cfg.num_rois - fg_inds_mask.sum()
bg_inds_mask = self._bernoulli_sample_masks(bg_mask, num_bg_rois, 1)
labels = labels * fg_inds_mask
keep_mask = fg_inds_mask + bg_inds_mask
_, keep_inds = | F.cond_take(keep_mask == 1, keep_mask) | megengine.functional.cond_take |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = | F.softmax(pred_logits, axis=1) | megengine.functional.softmax |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = F.argmax(overlaps_normal, axis=1)
max_overlaps_ignore = overlaps_ignore.max(axis=1)
gt_assignment_ignore = F.argmax(overlaps_ignore, axis=1)
ignore_assign_mask = (max_overlaps_normal < self.cfg.fg_threshold) * (
max_overlaps_ignore > max_overlaps_normal
)
max_overlaps = (
max_overlaps_normal * (1 - ignore_assign_mask)
+ max_overlaps_ignore * ignore_assign_mask
)
gt_assignment = (
gt_assignment_normal * (1 - ignore_assign_mask)
+ gt_assignment_ignore * ignore_assign_mask
)
gt_assignment = gt_assignment.astype("int32")
labels = gt_boxes_per_img.ai[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) * (
labels != self.cfg.ignore_label
)
bg_mask = (max_overlaps < self.cfg.bg_threshold_high) * (
max_overlaps >= self.cfg.bg_threshold_low
)
num_fg_rois = self.cfg.num_rois * self.cfg.fg_ratio
fg_inds_mask = self._bernoulli_sample_masks(fg_mask, num_fg_rois, 1)
num_bg_rois = self.cfg.num_rois - fg_inds_mask.sum()
bg_inds_mask = self._bernoulli_sample_masks(bg_mask, num_bg_rois, 1)
labels = labels * fg_inds_mask
keep_mask = fg_inds_mask + bg_inds_mask
_, keep_inds = F.cond_take(keep_mask == 1, keep_mask)
# Add next line to avoid memory exceed
keep_inds = keep_inds[: F.minimum(self.cfg.num_rois, keep_inds.shapeof(0))]
# labels
labels = labels.ai[keep_inds].astype("int32")
rois = all_rois.ai[keep_inds]
target_boxes = gt_boxes_per_img.ai[gt_assignment.ai[keep_inds], :4]
bbox_targets = self.box_coder.encode(rois[:, 1:5], target_boxes)
bbox_targets = bbox_targets.reshape(-1, 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
return (
F.zero_grad( | F.concat(return_rois, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = F.argmax(overlaps_normal, axis=1)
max_overlaps_ignore = overlaps_ignore.max(axis=1)
gt_assignment_ignore = F.argmax(overlaps_ignore, axis=1)
ignore_assign_mask = (max_overlaps_normal < self.cfg.fg_threshold) * (
max_overlaps_ignore > max_overlaps_normal
)
max_overlaps = (
max_overlaps_normal * (1 - ignore_assign_mask)
+ max_overlaps_ignore * ignore_assign_mask
)
gt_assignment = (
gt_assignment_normal * (1 - ignore_assign_mask)
+ gt_assignment_ignore * ignore_assign_mask
)
gt_assignment = gt_assignment.astype("int32")
labels = gt_boxes_per_img.ai[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) * (
labels != self.cfg.ignore_label
)
bg_mask = (max_overlaps < self.cfg.bg_threshold_high) * (
max_overlaps >= self.cfg.bg_threshold_low
)
num_fg_rois = self.cfg.num_rois * self.cfg.fg_ratio
fg_inds_mask = self._bernoulli_sample_masks(fg_mask, num_fg_rois, 1)
num_bg_rois = self.cfg.num_rois - fg_inds_mask.sum()
bg_inds_mask = self._bernoulli_sample_masks(bg_mask, num_bg_rois, 1)
labels = labels * fg_inds_mask
keep_mask = fg_inds_mask + bg_inds_mask
_, keep_inds = F.cond_take(keep_mask == 1, keep_mask)
# Add next line to avoid memory exceed
keep_inds = keep_inds[: F.minimum(self.cfg.num_rois, keep_inds.shapeof(0))]
# labels
labels = labels.ai[keep_inds].astype("int32")
rois = all_rois.ai[keep_inds]
target_boxes = gt_boxes_per_img.ai[gt_assignment.ai[keep_inds], :4]
bbox_targets = self.box_coder.encode(rois[:, 1:5], target_boxes)
bbox_targets = bbox_targets.reshape(-1, 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
return (
F.zero_grad(F.concat(return_rois, axis=0)),
F.zero_grad( | F.concat(return_labels, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(self.cfg.batch_per_gpu):
num_valid_boxes = im_info[bid, 4]
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = mge.ones((gt_boxes_per_img.shapeof(0), 1)) * bid
# if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
_, batch_roi_inds = F.cond_take(batch_roi_mask == 1, batch_roi_mask)
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois])
overlaps_normal, overlaps_ignore = layers.get_iou(
all_rois[:, 1:5], gt_boxes_per_img, return_ignore=True,
)
max_overlaps_normal = overlaps_normal.max(axis=1)
gt_assignment_normal = F.argmax(overlaps_normal, axis=1)
max_overlaps_ignore = overlaps_ignore.max(axis=1)
gt_assignment_ignore = F.argmax(overlaps_ignore, axis=1)
ignore_assign_mask = (max_overlaps_normal < self.cfg.fg_threshold) * (
max_overlaps_ignore > max_overlaps_normal
)
max_overlaps = (
max_overlaps_normal * (1 - ignore_assign_mask)
+ max_overlaps_ignore * ignore_assign_mask
)
gt_assignment = (
gt_assignment_normal * (1 - ignore_assign_mask)
+ gt_assignment_ignore * ignore_assign_mask
)
gt_assignment = gt_assignment.astype("int32")
labels = gt_boxes_per_img.ai[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) * (
labels != self.cfg.ignore_label
)
bg_mask = (max_overlaps < self.cfg.bg_threshold_high) * (
max_overlaps >= self.cfg.bg_threshold_low
)
num_fg_rois = self.cfg.num_rois * self.cfg.fg_ratio
fg_inds_mask = self._bernoulli_sample_masks(fg_mask, num_fg_rois, 1)
num_bg_rois = self.cfg.num_rois - fg_inds_mask.sum()
bg_inds_mask = self._bernoulli_sample_masks(bg_mask, num_bg_rois, 1)
labels = labels * fg_inds_mask
keep_mask = fg_inds_mask + bg_inds_mask
_, keep_inds = F.cond_take(keep_mask == 1, keep_mask)
# Add next line to avoid memory exceed
keep_inds = keep_inds[: F.minimum(self.cfg.num_rois, keep_inds.shapeof(0))]
# labels
labels = labels.ai[keep_inds].astype("int32")
rois = all_rois.ai[keep_inds]
target_boxes = gt_boxes_per_img.ai[gt_assignment.ai[keep_inds], :4]
bbox_targets = self.box_coder.encode(rois[:, 1:5], target_boxes)
bbox_targets = bbox_targets.reshape(-1, 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
return (
F.zero_grad(F.concat(return_rois, axis=0)),
F.zero_grad(F.concat(return_labels, axis=0)),
F.zero_grad( | F.concat(return_bbox_targets, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
from official.vision.detection import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, (cfg.num_classes + 1) * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for classification
loss_rcnn_cls = layers.softmax_loss(pred_logits, labels)
# loss for regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes + 1, 4)
vlabels = labels.reshape(-1, 1).broadcast((labels.shapeof(0), 4))
pred_offsets = F.indexing_one_hot(pred_offsets, vlabels, axis=1)
loss_rcnn_loc = layers.get_smooth_l1_loss(
pred_offsets,
bbox_targets,
labels,
self.cfg.rcnn_smooth_l1_beta,
norm_type="all",
)
loss_dict = {"loss_rcnn_cls": loss_rcnn_cls, "loss_rcnn_loc": loss_rcnn_loc}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof(0), self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = (
| F.add_axis(rcnn_rois[:, 1:5], 1) | megengine.functional.add_axis |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = | M.Sequential(*cls_subnet) | megengine.module.Sequential |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = | M.Sequential(*bbox_subnet) | megengine.module.Sequential |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
self.cls_score = M.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1,
)
self.bbox_pred = M.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
# Initialization
for modules in [
self.cls_subnet,
self.bbox_subnet,
self.cls_score,
self.bbox_pred,
]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, mean=0, std=0.01)
M.init.fill_(layer.bias, 0)
# Use prior in model initialization to improve stability
bias_value = -math.log((1 - prior_prob) / prior_prob)
| M.init.fill_(self.cls_score.bias, bias_value) | megengine.module.init.fill_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append( | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
bbox_subnet.append( | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
self.cls_score = M.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1,
)
self.bbox_pred = M.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
# Initialization
for modules in [
self.cls_subnet,
self.bbox_subnet,
self.cls_score,
self.bbox_pred,
]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
| M.init.normal_(layer.weight, mean=0, std=0.01) | megengine.module.init.normal_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
from typing import List
import megengine.module as M
from megengine.core import Tensor
from official.vision.detection.layers import basic
class RetinaNetHead(M.Module):
"""
The head used in RetinaNet for object classification and box regression.
"""
def __init__(self, cfg, input_shape: List[basic.ShapeSpec]):
super().__init__()
in_channels = input_shape[0].channels
num_classes = cfg.num_classes
num_convs = 4
prior_prob = cfg.cls_prior_prob
num_anchors = [9, 9, 9, 9, 9]
assert (
len(set(num_anchors)) == 1
), "not support different number of anchors between levels"
num_anchors = num_anchors[0]
cls_subnet = []
bbox_subnet = []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1,)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
self.cls_score = M.Conv2d(
in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1,
)
self.bbox_pred = M.Conv2d(
in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1
)
# Initialization
for modules in [
self.cls_subnet,
self.bbox_subnet,
self.cls_score,
self.bbox_pred,
]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, mean=0, std=0.01)
| M.init.fill_(layer.bias, 0) | megengine.module.init.fill_ |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = | dtype.qint8(scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = | F.round(x) | megengine.functional.round |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = | F.clip(x, -128, 127) | megengine.functional.clip |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = | dtype.qint8(output_scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = | dtype.qint8(inp_scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = | dtype.qint8(w_scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = | dtype.qint32(inp_scale * w_scale) | megengine.core.tensor.dtype.qint32 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = | dtype.qint8(outp_scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = | dtype.quint4(inp_scale, 0) | megengine.core.tensor.dtype.quint4 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = | dtype.qint4(w_scale) | megengine.core.tensor.dtype.qint4 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = | dtype.qint32(inp_scale * w_scale) | megengine.core.tensor.dtype.qint32 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = | dtype.quint4(outp_scale, 0) | megengine.core.tensor.dtype.quint4 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update( | create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale) | megengine.quantization.create_qparams |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update( | create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale) | megengine.quantization.create_qparams |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = | dtype.get_scale(inp_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = | dtype.get_scale(w_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = | dtype.get_scale(b_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = | dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype) | megengine.core.tensor.dtype.convert_to_qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = | dtype.convert_to_qint8(w_v * w_scale, w_dtype) | megengine.core.tensor.dtype.convert_to_qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = | dtype.convert_to_qint32(b_v * b_scale, b_dtype) | megengine.core.tensor.dtype.convert_to_qint32 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = | mge.tensor(inpv, dtype=inp_dtype) | megengine.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = | mge.Parameter(wv, dtype=w_dtype) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = | mge.Parameter(bv, dtype=b_dtype) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = | F.flatten(expected) | megengine.functional.flatten |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = | F.flatten(result) | megengine.functional.flatten |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
| get_device_count("gpu") | megengine.device.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = | dtype.get_scale(inp_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = | dtype.get_scale(w_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = | dtype.get_scale(b_dtype) | megengine.core.tensor.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = | dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype) | megengine.core.tensor.dtype.convert_to_quint4 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = | dtype.convert_to_qint4(w_v * w_scale, w_dtype) | megengine.core.tensor.dtype.convert_to_qint4 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = | dtype.convert_to_qint32(b_v * b_scale, b_dtype) | megengine.core.tensor.dtype.convert_to_qint32 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = | mge.Tensor(inpv, dtype=inp_dtype) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = | mge.Parameter(wv, dtype=w_dtype) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = | mge.Parameter(bv, dtype=b_dtype) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = | F.flatten(expected) | megengine.functional.flatten |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = | F.flatten(result) | megengine.functional.flatten |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = | dtype.qint8(inp_scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = | dtype.qint8(weight_scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = | dtype.qint32(bias_scale) | megengine.core.tensor.dtype.qint32 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = | dtype.qint8(out_scale) | megengine.core.tensor.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = | dtype.convert_to_qint8(inp_fp32, inp_dtype) | megengine.core.tensor.dtype.convert_to_qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = | dtype.convert_to_qint8(weight_fp32, weight_dtype) | megengine.core.tensor.dtype.convert_to_qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = | dtype.convert_to_qint32(bias_fp32, bias_dtype) | megengine.core.tensor.dtype.convert_to_qint32 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = | mge.tensor(inp_int8, dtype=inp_dtype) | megengine.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = mge.tensor(inp_int8, dtype=inp_dtype)
weight_int8 = | mge.Parameter(weight_int8, dtype=weight_dtype) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = mge.tensor(inp_int8, dtype=inp_dtype)
weight_int8 = mge.Parameter(weight_int8, dtype=weight_dtype)
bias_int32 = | mge.Parameter(bias_int32, dtype=bias_dtype) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = mge.tensor(inp_int8, dtype=inp_dtype)
weight_int8 = mge.Parameter(weight_int8, dtype=weight_dtype)
bias_int32 = mge.Parameter(bias_int32, dtype=bias_dtype)
inp_fp32 = inp_int8.astype("float32")
weight_fp32 = weight_int8.astype("float32")
bias_fp32 = bias_int32.astype("float32")
expected = F.conv_transpose2d(
inp_fp32,
weight_fp32,
bias_fp32 if has_bias else None,
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
conv_mode=conv_mode,
compute_mode=compute_mode,
)
expected = dtype.convert_to_qint8(expected.numpy(), out_dtype)
expected = | dtype.convert_from_qint8(expected) | megengine.core.tensor.dtype.convert_from_qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = mge.tensor(inp_int8, dtype=inp_dtype)
weight_int8 = mge.Parameter(weight_int8, dtype=weight_dtype)
bias_int32 = mge.Parameter(bias_int32, dtype=bias_dtype)
inp_fp32 = inp_int8.astype("float32")
weight_fp32 = weight_int8.astype("float32")
bias_fp32 = bias_int32.astype("float32")
expected = F.conv_transpose2d(
inp_fp32,
weight_fp32,
bias_fp32 if has_bias else None,
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
conv_mode=conv_mode,
compute_mode=compute_mode,
)
expected = dtype.convert_to_qint8(expected.numpy(), out_dtype)
expected = dtype.convert_from_qint8(expected)
conv_transpose2d = ConvTranspose2d(
in_channels=IC,
out_channels=OC,
kernel_size=(KH, KW),
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
bias=has_bias,
conv_mode=conv_mode,
compute_mode=compute_mode,
dtype=out_dtype,
)
conv_transpose2d.weight = | mge.Parameter(weight_int8) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = mge.tensor(inp_int8, dtype=inp_dtype)
weight_int8 = mge.Parameter(weight_int8, dtype=weight_dtype)
bias_int32 = mge.Parameter(bias_int32, dtype=bias_dtype)
inp_fp32 = inp_int8.astype("float32")
weight_fp32 = weight_int8.astype("float32")
bias_fp32 = bias_int32.astype("float32")
expected = F.conv_transpose2d(
inp_fp32,
weight_fp32,
bias_fp32 if has_bias else None,
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
conv_mode=conv_mode,
compute_mode=compute_mode,
)
expected = dtype.convert_to_qint8(expected.numpy(), out_dtype)
expected = dtype.convert_from_qint8(expected)
conv_transpose2d = ConvTranspose2d(
in_channels=IC,
out_channels=OC,
kernel_size=(KH, KW),
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
bias=has_bias,
conv_mode=conv_mode,
compute_mode=compute_mode,
dtype=out_dtype,
)
conv_transpose2d.weight = mge.Parameter(weight_int8)
if has_bias:
conv_transpose2d.bias = mge.Parameter(bias_int32)
result = conv_transpose2d.forward(inp_int8).numpy()
result = | dtype.convert_from_qint8(result) | megengine.core.tensor.dtype.convert_from_qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant( | _elwise(x1, mode=kind) | megengine.functional.elemwise._elwise |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant( | _elwise(x1, x2, mode=kind) | megengine.functional.elemwise._elwise |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = | F.transpose(var, (0, 1, 3, 4, 2)) | megengine.functional.transpose |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if | mge.is_cuda_available() | megengine.is_cuda_available |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = | F.transpose(result, (0, 1, 4, 2, 3)) | megengine.functional.transpose |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and get_cuda_compute_capability(0) < 61,
reason="does not support int8 when gpu compute capability less than 6.1",
)
def test_conv_transpose2d():
rng = np.random.RandomState(seed=2021)
def test_func(
N,
IC,
IH,
IW,
OC,
KH,
KW,
SH,
SW,
PH,
PW,
DH,
DW,
groups=1,
has_bias=True,
conv_mode: str = "cross_correlation",
compute_mode: str = "default",
):
inp_scale = np.float32(rng.uniform(low=0.04, high=0.06))
weight_scale = np.float32(rng.uniform(low=0.04, high=0.06))
bias_scale = inp_scale * weight_scale
out_scale = np.float32(rng.uniform(low=0.04, high=0.06))
inp_dtype = dtype.qint8(inp_scale)
weight_dtype = dtype.qint8(weight_scale)
bias_dtype = dtype.qint32(bias_scale)
out_dtype = dtype.qint8(out_scale)
inp_fp32 = rng.uniform(low=-1, high=1, size=(N, IC, IH, IW)).astype(np.float32)
weight_fp32 = rng.uniform(low=-1, high=1, size=(IC, OC, KH, KW)).astype(
np.float32
)
bias_fp32 = rng.uniform(low=-1, high=1, size=(1, OC, 1, 1)).astype(np.float32)
inp_int8 = dtype.convert_to_qint8(inp_fp32, inp_dtype)
weight_int8 = dtype.convert_to_qint8(weight_fp32, weight_dtype)
bias_int32 = dtype.convert_to_qint32(bias_fp32, bias_dtype)
inp_int8 = mge.tensor(inp_int8, dtype=inp_dtype)
weight_int8 = mge.Parameter(weight_int8, dtype=weight_dtype)
bias_int32 = mge.Parameter(bias_int32, dtype=bias_dtype)
inp_fp32 = inp_int8.astype("float32")
weight_fp32 = weight_int8.astype("float32")
bias_fp32 = bias_int32.astype("float32")
expected = F.conv_transpose2d(
inp_fp32,
weight_fp32,
bias_fp32 if has_bias else None,
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
conv_mode=conv_mode,
compute_mode=compute_mode,
)
expected = dtype.convert_to_qint8(expected.numpy(), out_dtype)
expected = dtype.convert_from_qint8(expected)
conv_transpose2d = ConvTranspose2d(
in_channels=IC,
out_channels=OC,
kernel_size=(KH, KW),
stride=(SH, SW),
padding=(PH, PW),
dilation=(DH, DW),
groups=groups,
bias=has_bias,
conv_mode=conv_mode,
compute_mode=compute_mode,
dtype=out_dtype,
)
conv_transpose2d.weight = mge.Parameter(weight_int8)
if has_bias:
conv_transpose2d.bias = | mge.Parameter(bias_int32) | megengine.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
| get_device_count("gpu") | megengine.device.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_uint4, w_int4, b_int32).astype("float32")
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(
get_device_count("gpu") > 0 and | get_cuda_compute_capability(0) | megengine.device.get_cuda_compute_capability |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return | F.relu(O) | megengine.functional.relu |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core.tensor import dtype
from megengine.device import get_cuda_compute_capability, get_device_count
from megengine.functional.elemwise import _elemwise_multi_type, _elwise
from megengine.module.quantized.conv import ConvTranspose2d
from megengine.quantization import QuantMode, create_qparams
def quant(x, scale):
x_dtype = dtype.qint8(scale)
return x.astype(x_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
@pytest.mark.parametrize("kind", ["abs", "sin", "sub", "mul", "fuse_add_tanh"])
def test_elemwise(kind):
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1_scale = np.float32(np.random.rand() + 1)
x1 = fake_quant(x1, x1_scale)
x1.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x1_scale))
x1_int8 = quant(x1, x1_scale)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2_scale = np.float32(np.random.rand() + 1)
x2 = fake_quant(x2, x2_scale)
x2.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", x2_scale))
x2_int8 = quant(x2, x2_scale)
output_scale = np.float32(np.random.rand() + 1)
output_dtype = dtype.qint8(output_scale)
quantized_kind = "q" + kind
if kind in ("abs", "sin"):
desired_out = fake_quant(_elwise(x1, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
else:
desired_out = fake_quant(_elwise(x1, x2, mode=kind), output_scale)
actual_out = (
_elemwise_multi_type(
x1_int8, x2_int8, mode=quantized_kind, dtype=output_dtype
).numpy()
* output_scale
)
np.testing.assert_allclose(actual_out, desired_out.numpy())
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = np.float32(np.random.rand() + 1)
w_scale = np.float32(np.random.rand() + 1)
outp_scale = np.float32(np.random.rand() + 1)
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = mge.tensor(inpv, dtype=inp_dtype)
w_int8 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else mge.Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if mge.is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skip(reason="does not support int4 when cuda version is lower than 10.2")
def test_conv_bias_int4():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.quint4(inp_scale, 0)
w_dtype = dtype.qint4(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.quint4(outp_scale, 0)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_quint4(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint4(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_uint4 = mge.Tensor(inpv, dtype=inp_dtype)
w_int4 = mge.Parameter(wv, dtype=w_dtype)
b_int32 = mge.Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_uint4.astype("float32")
w_fp32 = w_int4.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return | F.relu(O) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
from typing import Callable, Optional
import megengine._internal as mgb
from ..core import set_default_device
_master_ip = None
_master_port = 0
_world_size = 0
_rank = 0
_backend = None
def init_process_group(
master_ip: str,
master_port: int,
world_size: int,
rank: int,
dev: int,
backend: Optional[str] = "nccl",
) -> None:
"""Initialize the distributed process group, and also specify the device used in the current process.
:param master_ip: IP address of the master node.
:param master_port: Port available for all processes to communicate.
:param world_size: Total number of processes participating in the job.
:param rank: Rank of the current process.
:param dev: The GPU device id to bind this process to.
:param backend: Communicator backend, currently support 'nccl' and 'ucx'
"""
global _master_ip # pylint: disable=global-statement
global _master_port # pylint: disable=global-statement
global _world_size # pylint: disable=global-statement
global _rank # pylint: disable=global-statement
global _backend # pylint: disable=global-statement
if not isinstance(master_ip, str):
raise TypeError("Expect type str but got {}".format(type(master_ip)))
if not isinstance(master_port, int):
raise TypeError("Expect type int but got {}".format(type(master_port)))
if not isinstance(world_size, int):
raise TypeError("Expect type int but got {}".format(type(world_size)))
if not isinstance(rank, int):
raise TypeError("Expect type int but got {}".format(type(rank)))
if not isinstance(backend, str):
raise TypeError("Expect type str but got {}".format(type(backend)))
_master_ip = master_ip
_master_port = master_port
_world_size = world_size
_rank = rank
_backend = backend
set_default_device(mgb.comp_node("gpu" + str(dev)))
if rank == 0:
_master_port = mgb.config.create_mm_server("0.0.0.0", master_port)
if _master_port == -1:
raise Exception("Failed to start server on port {}".format(master_port))
else:
assert master_port > 0, "master_port must be specified for non-zero rank"
def is_distributed() -> bool:
"""Return True if the distributed process group has been initialized"""
return _world_size is not None and _world_size > 1
def get_master_ip() -> str:
"""Get the IP address of the master node"""
return str(_master_ip)
def get_master_port() -> int:
"""Get the port of the rpc server on the master node"""
return _master_port
def get_world_size() -> int:
"""Get the total number of processes participating in the job"""
return _world_size
def get_rank() -> int:
"""Get the rank of the current process"""
return _rank
def get_backend() -> str:
"""Get the backend str"""
return str(_backend)
def group_barrier() -> None:
"""Block until all ranks in the group reach this barrier"""
| mgb.config.group_barrier(_master_ip, _master_port, _world_size, _rank) | megengine._internal.config.group_barrier |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
from typing import Callable, Optional
import megengine._internal as mgb
from ..core import set_default_device
_master_ip = None
_master_port = 0
_world_size = 0
_rank = 0
_backend = None
def init_process_group(
master_ip: str,
master_port: int,
world_size: int,
rank: int,
dev: int,
backend: Optional[str] = "nccl",
) -> None:
"""Initialize the distributed process group, and also specify the device used in the current process.
:param master_ip: IP address of the master node.
:param master_port: Port available for all processes to communicate.
:param world_size: Total number of processes participating in the job.
:param rank: Rank of the current process.
:param dev: The GPU device id to bind this process to.
:param backend: Communicator backend, currently support 'nccl' and 'ucx'
"""
global _master_ip # pylint: disable=global-statement
global _master_port # pylint: disable=global-statement
global _world_size # pylint: disable=global-statement
global _rank # pylint: disable=global-statement
global _backend # pylint: disable=global-statement
if not isinstance(master_ip, str):
raise TypeError("Expect type str but got {}".format(type(master_ip)))
if not isinstance(master_port, int):
raise TypeError("Expect type int but got {}".format(type(master_port)))
if not isinstance(world_size, int):
raise TypeError("Expect type int but got {}".format(type(world_size)))
if not isinstance(rank, int):
raise TypeError("Expect type int but got {}".format(type(rank)))
if not isinstance(backend, str):
raise TypeError("Expect type str but got {}".format(type(backend)))
_master_ip = master_ip
_master_port = master_port
_world_size = world_size
_rank = rank
_backend = backend
set_default_device(mgb.comp_node("gpu" + str(dev)))
if rank == 0:
_master_port = | mgb.config.create_mm_server("0.0.0.0", master_port) | megengine._internal.config.create_mm_server |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = | CompNode(device) | megengine.device.CompNode |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# skip this test because could not do several reduce sequentially with opr cache
return
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = megengine.tensor(1e-5, dtype=dtype, device=device)
diff = rand_tensor(input_shape)
out1, grad1 = subgraph_batch_norm(inp, weight, bias, eps, diff)
out2, grad2 = primitive_batch_norm(inp, weight, bias, eps, diff)
_assert_allclose(out1.numpy(), out2.numpy())
_assert_allclose(grad1.numpy(), grad2.numpy())
@functools.lru_cache(maxsize=None)
def _get_mul_fn(dtype, device):
@subgraph_fn(
"Mul",
dtype=dtype,
device=device,
nr_inputs=2,
gopt_level=None,
jit_fusion=False,
custom_grad=True,
)
def mul(inputs, f, c):
x, y = inputs[0:2]
z = f("*", x, y)
(dz,) = yield (z,)
dx = f("*", dz, y)
dy = f("*", dz, x)
yield (dx, dy)
return mul
def test_subgraph_jit_backward():
x_np = np.random.rand(3, 4, 5).astype("float32")
x1 = | megengine.Tensor(x_np) | megengine.Tensor |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# skip this test because could not do several reduce sequentially with opr cache
return
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = megengine.tensor(1e-5, dtype=dtype, device=device)
diff = rand_tensor(input_shape)
out1, grad1 = subgraph_batch_norm(inp, weight, bias, eps, diff)
out2, grad2 = primitive_batch_norm(inp, weight, bias, eps, diff)
_assert_allclose(out1.numpy(), out2.numpy())
_assert_allclose(grad1.numpy(), grad2.numpy())
@functools.lru_cache(maxsize=None)
def _get_mul_fn(dtype, device):
@subgraph_fn(
"Mul",
dtype=dtype,
device=device,
nr_inputs=2,
gopt_level=None,
jit_fusion=False,
custom_grad=True,
)
def mul(inputs, f, c):
x, y = inputs[0:2]
z = f("*", x, y)
(dz,) = yield (z,)
dx = f("*", dz, y)
dy = f("*", dz, x)
yield (dx, dy)
return mul
def test_subgraph_jit_backward():
x_np = np.random.rand(3, 4, 5).astype("float32")
x1 = megengine.Tensor(x_np)
x2 = | megengine.Tensor(x_np) | megengine.Tensor |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# skip this test because could not do several reduce sequentially with opr cache
return
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = megengine.tensor(1e-5, dtype=dtype, device=device)
diff = rand_tensor(input_shape)
out1, grad1 = subgraph_batch_norm(inp, weight, bias, eps, diff)
out2, grad2 = primitive_batch_norm(inp, weight, bias, eps, diff)
_assert_allclose(out1.numpy(), out2.numpy())
_assert_allclose(grad1.numpy(), grad2.numpy())
@functools.lru_cache(maxsize=None)
def _get_mul_fn(dtype, device):
@subgraph_fn(
"Mul",
dtype=dtype,
device=device,
nr_inputs=2,
gopt_level=None,
jit_fusion=False,
custom_grad=True,
)
def mul(inputs, f, c):
x, y = inputs[0:2]
z = f("*", x, y)
(dz,) = yield (z,)
dx = f("*", dz, y)
dy = f("*", dz, x)
yield (dx, dy)
return mul
def test_subgraph_jit_backward():
x_np = np.random.rand(3, 4, 5).astype("float32")
x1 = megengine.Tensor(x_np)
x2 = megengine.Tensor(x_np)
mul = _get_mul_fn(x1.dtype, x1.device)
gm = | GradManager() | megengine.autodiff.grad_manager.GradManager |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# skip this test because could not do several reduce sequentially with opr cache
return
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = | megengine.tensor(1e-5, dtype=dtype, device=device) | megengine.tensor |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [ | get_default_device() | megengine.device.get_default_device |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.