prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = | F.reshape(x, self.out_shape) | megengine.functional.reshape |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = | F.reshape(x, self.out_shape1) | megengine.functional.reshape |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = | F.reshape(x, self.out_shape2) | megengine.functional.reshape |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = | F.vision.interpolate(x, size=self.out_shape, mode="bilinear") | megengine.functional.vision.interpolate |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = | F.vision.interpolate(x, size=self.out_shape2, mode="bilinear") | megengine.functional.vision.interpolate |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return | F.broadcast_to(x, (3, 5)) | megengine.functional.broadcast_to |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = | M.Linear(self.num_class, self.mid_dim, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = | M.BatchNorm1d(self.mid_dim) | megengine.module.BatchNorm1d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = | M.Linear(self.mid_dim, self.mid_dim, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = | M.BatchNorm1d(self.mid_dim) | megengine.module.BatchNorm1d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = | M.Linear(self.mid_dim, self.num_class, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = | M.Linear(self.num_class, self.mid_dim, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = | M.BatchNorm1d(self.mid_dim) | megengine.module.BatchNorm1d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = | M.Linear(self.mid_dim, self.mid_dim, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = | M.BatchNorm1d(self.mid_dim) | megengine.module.BatchNorm1d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = | M.Linear(self.mid_dim, self.num_class, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = | F.leaky_relu(x) | megengine.functional.leaky_relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.leaky_relu(x)
x = | F.leaky_relu(x) | megengine.functional.leaky_relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.leaky_relu(x)
x = F.leaky_relu(x)
x = | F.tanh(x) | megengine.functional.tanh |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.leaky_relu(x)
x = F.leaky_relu(x)
x = F.tanh(x)
x = self.fc1(x)
x = | F.leaky_relu(x) | megengine.functional.leaky_relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.leaky_relu(x)
x = F.leaky_relu(x)
x = F.tanh(x)
x = self.fc1(x)
x = F.leaky_relu(x)
x = self.bn1(x)
x = | F.tanh(x) | megengine.functional.tanh |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.leaky_relu(x)
x = F.leaky_relu(x)
x = F.tanh(x)
x = self.fc1(x)
x = F.leaky_relu(x)
x = self.bn1(x)
x = F.tanh(x)
x = self.fc2(x)
x = | F.leaky_relu(x) | megengine.functional.leaky_relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net( | mge.tensor(data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net( | mge.tensor(data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference( | mge.tensor(data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
| M.ConvTranspose2d(5, 3, (3, 3)) | megengine.module.ConvTranspose2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
| M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1) | megengine.module.ConvTranspose2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
| M.ConvTranspose2d(5, 3, (3, 3)) | megengine.module.ConvTranspose2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return | F.remove_axis(a, 0) | megengine.functional.remove_axis |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return | F.squeeze(a, 0) | megengine.functional.squeeze |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return | F.sum(a, axis=2) | megengine.functional.sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = | F.softmax(x) | megengine.functional.softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else | F.tanh(x) | megengine.functional.tanh |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = | F.softmax(x) | megengine.functional.softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else | F.tanh(x) | megengine.functional.tanh |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + | mge.tensor(self.data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return | F.mean(a, axis=2) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return | F.max(a, axis=2) | megengine.functional.max |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum( | F.minimum(x, 6) | megengine.functional.minimum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - | mge.tensor(self.data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = | mge.tensor(self.data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = | F.maximum(x, y) | megengine.functional.maximum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + | mge.tensor(self.data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + | mge.tensor(self.data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = | F.minimum(x, y) | megengine.functional.minimum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + | mge.tensor(self.data) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + | mge.tensor(self.data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = | F.ceil(a) | megengine.functional.ceil |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = | F.floor(a) | megengine.functional.floor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = | mge.tensor(self.data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / | mge.tensor(self.data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = | F.abs(a) | megengine.functional.abs |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = | F.exp(a) | megengine.functional.exp |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = | F.log(a) | megengine.functional.log |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = | F.relu(y) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + | mge.tensor(self.data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * | mge.tensor(self.data1) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + | mge.tensor(self.data2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = | F.sigmoid(y) | megengine.functional.sigmoid |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + | mge.tensor(self.data2) | megengine.tensor |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = | M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1) | megengine.module.Conv2d |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = | F.concat(return_rois, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = F.concat(batch_rpn_bbox_offset_list, axis=0)
final_rpn_cls_score_list.append(batch_rpn_cls_scores)
final_rpn_bbox_offset_list.append(batch_rpn_bbox_offsets)
final_rpn_cls_scores = | F.concat(final_rpn_cls_score_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = F.concat(batch_rpn_bbox_offset_list, axis=0)
final_rpn_cls_score_list.append(batch_rpn_cls_scores)
final_rpn_bbox_offset_list.append(batch_rpn_bbox_offsets)
final_rpn_cls_scores = F.concat(final_rpn_cls_score_list, axis=0)
final_rpn_bbox_offsets = | F.concat(final_rpn_bbox_offset_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = F.concat(batch_rpn_bbox_offset_list, axis=0)
final_rpn_cls_score_list.append(batch_rpn_cls_scores)
final_rpn_bbox_offset_list.append(batch_rpn_bbox_offsets)
final_rpn_cls_scores = F.concat(final_rpn_cls_score_list, axis=0)
final_rpn_bbox_offsets = F.concat(final_rpn_bbox_offset_list, axis=0)
return final_rpn_cls_scores, final_rpn_bbox_offsets
def get_ground_truth(self, anchors_list, batched_gt_boxes, batched_num_gts):
anchors = | F.concat(anchors_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
| M.init.normal_(l.weight, std=0.01) | megengine.module.init.normal_ |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
| M.init.fill_(l.bias, 0) | megengine.module.init.fill_ |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = | F.concat(batch_proposal_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = | F.concat(batch_score_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = | F.concat(batch_level_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = | F.full((rois.shape[0], 1), bid) | megengine.functional.full |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = | F.concat([batch_inds, rois[:, :4]], axis=1) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = | F.concat(batch_rpn_cls_score_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = | F.concat(batch_rpn_bbox_offset_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / | F.maximum(num_valid, 1) | megengine.functional.maximum |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = | F.topk(scores, descending=True, k=prev_nms_top_n) | megengine.functional.topk |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append( | F.full_like(scores, l) | megengine.functional.full_like |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = F.concat(batch_rpn_bbox_offset_list, axis=0)
final_rpn_cls_score_list.append(batch_rpn_cls_scores)
final_rpn_bbox_offset_list.append(batch_rpn_bbox_offsets)
final_rpn_cls_scores = F.concat(final_rpn_cls_score_list, axis=0)
final_rpn_bbox_offsets = F.concat(final_rpn_bbox_offset_list, axis=0)
return final_rpn_cls_scores, final_rpn_bbox_offsets
def get_ground_truth(self, anchors_list, batched_gt_boxes, batched_num_gts):
anchors = F.concat(anchors_list, axis=0)
labels_list = []
offsets_list = []
for bid in range(batched_gt_boxes.shape[0]):
gt_boxes = batched_gt_boxes[bid, :batched_num_gts[bid]]
overlaps = layers.get_iou(gt_boxes[:, :4], anchors)
matched_indices, labels = self.matcher(overlaps)
offsets = self.box_coder.encode(anchors, gt_boxes[matched_indices, :4])
# sample positive labels
num_positive = int(self.cfg.num_sample_anchors * self.cfg.positive_anchor_ratio)
labels = layers.sample_labels(labels, num_positive, 1, -1)
# sample negative labels
num_positive = (labels == 1).sum().astype("int32")
num_negative = self.cfg.num_sample_anchors - num_positive
labels = layers.sample_labels(labels, num_negative, 0, -1)
labels_list.append(labels)
offsets_list.append(offsets)
return (
| F.concat(labels_list, axis=0) | megengine.functional.concat |
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = F.concat(batch_rpn_bbox_offset_list, axis=0)
final_rpn_cls_score_list.append(batch_rpn_cls_scores)
final_rpn_bbox_offset_list.append(batch_rpn_bbox_offsets)
final_rpn_cls_scores = F.concat(final_rpn_cls_score_list, axis=0)
final_rpn_bbox_offsets = F.concat(final_rpn_bbox_offset_list, axis=0)
return final_rpn_cls_scores, final_rpn_bbox_offsets
def get_ground_truth(self, anchors_list, batched_gt_boxes, batched_num_gts):
anchors = F.concat(anchors_list, axis=0)
labels_list = []
offsets_list = []
for bid in range(batched_gt_boxes.shape[0]):
gt_boxes = batched_gt_boxes[bid, :batched_num_gts[bid]]
overlaps = layers.get_iou(gt_boxes[:, :4], anchors)
matched_indices, labels = self.matcher(overlaps)
offsets = self.box_coder.encode(anchors, gt_boxes[matched_indices, :4])
# sample positive labels
num_positive = int(self.cfg.num_sample_anchors * self.cfg.positive_anchor_ratio)
labels = layers.sample_labels(labels, num_positive, 1, -1)
# sample negative labels
num_positive = (labels == 1).sum().astype("int32")
num_negative = self.cfg.num_sample_anchors - num_positive
labels = layers.sample_labels(labels, num_negative, 0, -1)
labels_list.append(labels)
offsets_list.append(offsets)
return (
F.concat(labels_list, axis=0).detach(),
| F.concat(offsets_list, axis=0) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True))
class FlowEstimatorDense_temp(nn.Module):
def __init__(self, ch_in=64, f_channels=(128, 128, 96, 64, 32, 32), ch_out=2):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, ch_out, isReLU=False)
def forward(self, x):
x1 = F.concat([self.conv1(x), x], axis=1)
x2 = F.concat([self.conv2(x1), x1], axis=1)
x3 = F.concat([self.conv3(x2), x2], axis=1)
x4 = F.concat([self.conv4(x3), x3], axis=1)
x5 = F.concat([self.conv5(x4), x4], axis=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowMaskEstimator(FlowEstimatorDense_temp):
def __init__(self, ch_in, f_channels, ch_out):
super(FlowMaskEstimator, self).__init__(ch_in=ch_in, f_channels=f_channels, ch_out=ch_out)
class NeuralUpsampler(nn.Module):
def __init__(self):
super(NeuralUpsampler, self).__init__()
f_channels_es = (32, 32, 32, 16, 8)
in_C = 64
self.dense_estimator_mask = FlowEstimatorDense_temp(in_C, f_channels=f_channels_es, ch_out=3)
self.upsample_output_conv = nn.Sequential(
conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2),
)
def forward(self, flow_init, feature_1, feature_2, output_level_flow=None):
n, c, h, w = flow_init.shape
n_f, c_f, h_f, w_f = feature_1.shape
if h != h_f or w != w_f:
flow_init = F.vision.interpolate(flow_init, scale_factor=2., mode='bilinear', align_corners=True) * 2
feature_2_warp = flow_warp(feature_2, flow_init)
input_feature = | F.concat((feature_1, feature_2_warp), axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True))
class FlowEstimatorDense_temp(nn.Module):
def __init__(self, ch_in=64, f_channels=(128, 128, 96, 64, 32, 32), ch_out=2):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, ch_out, isReLU=False)
def forward(self, x):
x1 = F.concat([self.conv1(x), x], axis=1)
x2 = F.concat([self.conv2(x1), x1], axis=1)
x3 = F.concat([self.conv3(x2), x2], axis=1)
x4 = F.concat([self.conv4(x3), x3], axis=1)
x5 = F.concat([self.conv5(x4), x4], axis=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowMaskEstimator(FlowEstimatorDense_temp):
def __init__(self, ch_in, f_channels, ch_out):
super(FlowMaskEstimator, self).__init__(ch_in=ch_in, f_channels=f_channels, ch_out=ch_out)
class NeuralUpsampler(nn.Module):
def __init__(self):
super(NeuralUpsampler, self).__init__()
f_channels_es = (32, 32, 32, 16, 8)
in_C = 64
self.dense_estimator_mask = FlowEstimatorDense_temp(in_C, f_channels=f_channels_es, ch_out=3)
self.upsample_output_conv = nn.Sequential(
conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2),
)
def forward(self, flow_init, feature_1, feature_2, output_level_flow=None):
n, c, h, w = flow_init.shape
n_f, c_f, h_f, w_f = feature_1.shape
if h != h_f or w != w_f:
flow_init = F.vision.interpolate(flow_init, scale_factor=2., mode='bilinear', align_corners=True) * 2
feature_2_warp = flow_warp(feature_2, flow_init)
input_feature = F.concat((feature_1, feature_2_warp), axis=1)
_, x_out = self.dense_estimator_mask(input_feature)
inter_flow = x_out[:, :2, :, :]
inter_mask = x_out[:, 2, :, :]
inter_mask = | F.expand_dims(inter_mask, 1) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True))
class FlowEstimatorDense_temp(nn.Module):
def __init__(self, ch_in=64, f_channels=(128, 128, 96, 64, 32, 32), ch_out=2):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, ch_out, isReLU=False)
def forward(self, x):
x1 = F.concat([self.conv1(x), x], axis=1)
x2 = F.concat([self.conv2(x1), x1], axis=1)
x3 = F.concat([self.conv3(x2), x2], axis=1)
x4 = F.concat([self.conv4(x3), x3], axis=1)
x5 = F.concat([self.conv5(x4), x4], axis=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowMaskEstimator(FlowEstimatorDense_temp):
def __init__(self, ch_in, f_channels, ch_out):
super(FlowMaskEstimator, self).__init__(ch_in=ch_in, f_channels=f_channels, ch_out=ch_out)
class NeuralUpsampler(nn.Module):
def __init__(self):
super(NeuralUpsampler, self).__init__()
f_channels_es = (32, 32, 32, 16, 8)
in_C = 64
self.dense_estimator_mask = FlowEstimatorDense_temp(in_C, f_channels=f_channels_es, ch_out=3)
self.upsample_output_conv = nn.Sequential(
conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2),
)
def forward(self, flow_init, feature_1, feature_2, output_level_flow=None):
n, c, h, w = flow_init.shape
n_f, c_f, h_f, w_f = feature_1.shape
if h != h_f or w != w_f:
flow_init = F.vision.interpolate(flow_init, scale_factor=2., mode='bilinear', align_corners=True) * 2
feature_2_warp = flow_warp(feature_2, flow_init)
input_feature = F.concat((feature_1, feature_2_warp), axis=1)
_, x_out = self.dense_estimator_mask(input_feature)
inter_flow = x_out[:, :2, :, :]
inter_mask = x_out[:, 2, :, :]
inter_mask = F.expand_dims(inter_mask, 1)
inter_mask = | F.sigmoid(inter_mask) | megengine.functional.sigmoid |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), | nn.LeakyReLU(0.1) | megengine.module.LeakyReLU |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), | nn.InstanceNorm(out_planes, affine=IN_affine) | megengine.module.InstanceNorm |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), | nn.InstanceNorm(out_planes, affine=IN_affine) | megengine.module.InstanceNorm |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True))
class FlowEstimatorDense_temp(nn.Module):
def __init__(self, ch_in=64, f_channels=(128, 128, 96, 64, 32, 32), ch_out=2):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, ch_out, isReLU=False)
def forward(self, x):
x1 = F.concat([self.conv1(x), x], axis=1)
x2 = F.concat([self.conv2(x1), x1], axis=1)
x3 = F.concat([self.conv3(x2), x2], axis=1)
x4 = F.concat([self.conv4(x3), x3], axis=1)
x5 = F.concat([self.conv5(x4), x4], axis=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowMaskEstimator(FlowEstimatorDense_temp):
def __init__(self, ch_in, f_channels, ch_out):
super(FlowMaskEstimator, self).__init__(ch_in=ch_in, f_channels=f_channels, ch_out=ch_out)
class NeuralUpsampler(nn.Module):
def __init__(self):
super(NeuralUpsampler, self).__init__()
f_channels_es = (32, 32, 32, 16, 8)
in_C = 64
self.dense_estimator_mask = FlowEstimatorDense_temp(in_C, f_channels=f_channels_es, ch_out=3)
self.upsample_output_conv = nn.Sequential(
conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2),
)
def forward(self, flow_init, feature_1, feature_2, output_level_flow=None):
n, c, h, w = flow_init.shape
n_f, c_f, h_f, w_f = feature_1.shape
if h != h_f or w != w_f:
flow_init = | F.vision.interpolate(flow_init, scale_factor=2., mode='bilinear', align_corners=True) | megengine.functional.vision.interpolate |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), | nn.LeakyReLU(0.1) | megengine.module.LeakyReLU |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), | nn.BatchNorm2d(out_planes, affine=IN_affine) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), | nn.LeakyReLU(0.1) | megengine.module.LeakyReLU |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), | nn.BatchNorm2d(out_planes, affine=IN_affine) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import cv2
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.jit as jit
import numpy as np
from tqdm import tqdm
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
from official.vision.segmentation.utils import import_config_from_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-m", "--model_path", type=str, default=None, help="eval model file"
)
args = parser.parse_args()
cfg = import_config_from_file(args.config)
test_loader, test_size = build_dataloader(args.dataset_dir, cfg)
print("number of test images: %d" % (test_size))
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
model_dict = | mge.load(args.model_path) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import cv2
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.jit as jit
import numpy as np
from tqdm import tqdm
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
from official.vision.segmentation.utils import import_config_from_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-m", "--model_path", type=str, default=None, help="eval model file"
)
args = parser.parse_args()
cfg = import_config_from_file(args.config)
test_loader, test_size = build_dataloader(args.dataset_dir, cfg)
print("number of test images: %d" % (test_size))
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
model_dict = mge.load(args.model_path)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (args.model_path))
net.eval()
result_list = []
for sample_batched in tqdm(test_loader):
img = sample_batched[0].squeeze()
label = sample_batched[1].squeeze()
im_info = sample_batched[2]
pred = evaluate(net, img, cfg)
result_list.append({"pred": pred, "gt": label, "name":im_info[2]})
if cfg.VAL_SAVE:
save_results(result_list, cfg.VAL_SAVE, cfg)
compute_metric(result_list, cfg)
## inference one image
def pad_image_to_shape(img, shape, border_mode, value):
margin = np.zeros(4, np.uint32)
pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0
pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0
margin[0] = pad_height // 2
margin[1] = pad_height // 2 + pad_height % 2
margin[2] = pad_width // 2
margin[3] = pad_width // 2 + pad_width % 2
img = cv2.copyMakeBorder(
img, margin[0], margin[1], margin[2], margin[3], border_mode, value=value
)
return img, margin
def eval_single(net, img, is_flip):
@ | jit.trace(symbolic=True, opt_level=2) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import cv2
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.jit as jit
import numpy as np
from tqdm import tqdm
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
from official.vision.segmentation.utils import import_config_from_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-m", "--model_path", type=str, default=None, help="eval model file"
)
args = parser.parse_args()
cfg = import_config_from_file(args.config)
test_loader, test_size = build_dataloader(args.dataset_dir, cfg)
print("number of test images: %d" % (test_size))
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
model_dict = mge.load(args.model_path)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (args.model_path))
net.eval()
result_list = []
for sample_batched in tqdm(test_loader):
img = sample_batched[0].squeeze()
label = sample_batched[1].squeeze()
im_info = sample_batched[2]
pred = evaluate(net, img, cfg)
result_list.append({"pred": pred, "gt": label, "name":im_info[2]})
if cfg.VAL_SAVE:
save_results(result_list, cfg.VAL_SAVE, cfg)
compute_metric(result_list, cfg)
## inference one image
def pad_image_to_shape(img, shape, border_mode, value):
margin = np.zeros(4, np.uint32)
pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0
pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0
margin[0] = pad_height // 2
margin[1] = pad_height // 2 + pad_height % 2
margin[2] = pad_width // 2
margin[3] = pad_width // 2 + pad_width % 2
img = cv2.copyMakeBorder(
img, margin[0], margin[1], margin[2], margin[3], border_mode, value=value
)
return img, margin
def eval_single(net, img, is_flip):
@jit.trace(symbolic=True, opt_level=2)
def pred_fun(data, net=None):
net.eval()
pred = net(data)
return pred
data = | mge.tensor() | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import cv2
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.jit as jit
import numpy as np
from tqdm import tqdm
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
from official.vision.segmentation.utils import import_config_from_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-m", "--model_path", type=str, default=None, help="eval model file"
)
args = parser.parse_args()
cfg = import_config_from_file(args.config)
test_loader, test_size = build_dataloader(args.dataset_dir, cfg)
print("number of test images: %d" % (test_size))
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
model_dict = mge.load(args.model_path)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (args.model_path))
net.eval()
result_list = []
for sample_batched in tqdm(test_loader):
img = sample_batched[0].squeeze()
label = sample_batched[1].squeeze()
im_info = sample_batched[2]
pred = evaluate(net, img, cfg)
result_list.append({"pred": pred, "gt": label, "name":im_info[2]})
if cfg.VAL_SAVE:
save_results(result_list, cfg.VAL_SAVE, cfg)
compute_metric(result_list, cfg)
## inference one image
def pad_image_to_shape(img, shape, border_mode, value):
margin = np.zeros(4, np.uint32)
pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0
pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0
margin[0] = pad_height // 2
margin[1] = pad_height // 2 + pad_height % 2
margin[2] = pad_width // 2
margin[3] = pad_width // 2 + pad_width % 2
img = cv2.copyMakeBorder(
img, margin[0], margin[1], margin[2], margin[3], border_mode, value=value
)
return img, margin
def eval_single(net, img, is_flip):
@jit.trace(symbolic=True, opt_level=2)
def pred_fun(data, net=None):
net.eval()
pred = net(data)
return pred
data = mge.tensor()
data.set_value(img.transpose(2, 0, 1)[np.newaxis])
pred = pred_fun(data, net=net)
if is_flip:
img_flip = img[:, ::-1, :]
data.set_value(img_flip.transpose(2, 0, 1)[np.newaxis])
pred_flip = pred_fun(data, net=net)
pred = (pred + pred_flip[:, :, :, ::-1]) / 2.0
del pred_flip
pred = pred.numpy().squeeze().transpose(1, 2, 0)
del data
return pred
def evaluate(net, img, cfg):
ori_h, ori_w, _ = img.shape
pred_all = np.zeros((ori_h, ori_w, cfg.NUM_CLASSES))
for rate in cfg.VAL_MULTISCALE:
if cfg.VAL_SLIP:
new_h, new_w = int(ori_h*rate), int(ori_w*rate)
val_size = (cfg.VAL_HEIGHT, cfg.VAL_WIDTH)
else:
new_h, new_w = int(cfg.VAL_HEIGHT*rate), int(cfg.VAL_WIDTH*rate)
val_size = (new_h, new_w)
img_scale = cv2.resize(
img, (new_w, new_h), interpolation=cv2.INTER_LINEAR
)
if (new_h <= val_size[0]) and (new_h <= val_size[1]):
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pred = eval_single(net, img_pad, cfg.VAL_FLIP)
pred = pred[
margin[0] : (pred.shape[0] - margin[1]),
margin[2] : (pred.shape[1] - margin[3]),
:,
]
else:
stride_rate = 2 / 3
stride = [int(np.ceil(i * stride_rate)) for i in val_size]
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pad_h, pad_w = img_pad.shape[:2]
r_grid, c_grid = [
int(np.ceil((ps - cs) / stride)) + 1
for ps, cs, stride in zip(img_pad.shape, val_size, stride)
]
pred_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
count_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride[1]
s_y = grid_yidx * stride[0]
e_x = min(s_x + val_size[1], pad_w)
e_y = min(s_y + val_size[0], pad_h)
s_x = e_x - val_size[1]
s_y = e_y - val_size[0]
img_sub = img_pad[s_y:e_y, s_x:e_x, :]
tpred = eval_single(net, img_sub, cfg.VAL_FLIP)
count_scale[s_y:e_y, s_x:e_x, :] += 1
pred_scale[s_y:e_y, s_x:e_x, :] += tpred
#pred_scale = pred_scale / count_scale
pred = pred_scale[
margin[0] : (pred_scale.shape[0] - margin[1]),
margin[2] : (pred_scale.shape[1] - margin[3]),
:,
]
pred = cv2.resize(pred, (ori_w, ori_h), interpolation=cv2.INTER_LINEAR)
pred_all = pred_all + pred
#pred_all = pred_all / len(cfg.VAL_MULTISCALE)
result = np.argmax(pred_all, axis=2).astype(np.uint8)
return result
def save_results(result_list, save_dir, cfg):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for idx, sample in enumerate(result_list):
if cfg.DATASET == "Cityscapes":
name = sample["name"].split('/')[-1][:-4]
else:
name = sample["name"]
file_path = os.path.join(save_dir, "%s.png"%name)
cv2.imwrite(file_path, sample["pred"])
file_path = os.path.join(save_dir, "%s.gt.png"%name)
cv2.imwrite(file_path, sample["gt"])
# voc cityscapes metric
def compute_metric(result_list, cfg):
class_num = cfg.NUM_CLASSES
hist = np.zeros((class_num, class_num))
correct = 0
labeled = 0
count = 0
for idx in range(len(result_list)):
pred = result_list[idx]['pred']
gt = result_list[idx]['gt']
assert(pred.shape == gt.shape)
k = (gt>=0) & (gt<class_num)
labeled += np.sum(k)
correct += np.sum((pred[k]==gt[k]))
hist += np.bincount(class_num * gt[k].astype(int) + pred[k].astype(int), minlength=class_num**2).reshape(class_num, class_num)
count += 1
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
freq = hist.sum(1) / hist.sum()
freq_IU = (iu[freq > 0] * freq[freq >0]).sum()
mean_pixel_acc = correct / labeled
if cfg.DATASET == "VOC2012":
class_names = ("background", ) + dataset.PascalVOC.class_names
elif cfg.DATASET == "Cityscapes":
class_names = dataset.Cityscapes.class_names
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
n = iu.size
lines = []
for i in range(n):
if class_names is None:
cls = 'Class %d:' % (i+1)
else:
cls = '%d %s' % (i+1, class_names[i])
lines.append('%-8s\t%.3f%%' % (cls, iu[i] * 100))
lines.append('---------------------------- %-8s\t%.3f%%\t%-8s\t%.3f%%' % ('mean_IU', mean_IU * 100,'mean_pixel_ACC',mean_pixel_acc*100))
line = "\n".join(lines)
print(line)
return mean_IU
class EvalPascalVOC(dataset.PascalVOC):
def _trans_mask(self, mask):
label = np.ones(mask.shape[:2]) * 255
class_colors = self.class_colors.copy()
class_colors.insert(0, [0,0,0])
for i in range(len(class_colors)):
b, g, r = class_colors[i]
label[
(mask[:, :, 0] == b) & (mask[:, :, 1] == g) & (mask[:, :, 2] == r)
] = i
return label.astype(np.uint8)
def build_dataloader(dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
val_dataset = EvalPascalVOC(
dataset_dir,
"val",
order=["image", "mask", "info"]
)
elif cfg.DATASET == "Cityscapes":
val_dataset = dataset.Cityscapes(
dataset_dir,
"val",
mode='gtFine',
order=["image", "mask", "info"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
val_sampler = | data.SequentialSampler(val_dataset, cfg.VAL_BATCHES) | megengine.data.SequentialSampler |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = | F.broadcast_to(x, mesh_shape) | megengine.functional.broadcast_to |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y
class YOLOXHead(M.Module):
def __init__(
self, num_classes, width=1.0, strides=[8, 16, 32, 64],
in_channels=[256, 512, 1024, 1024], act="silu", depthwise=False
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = []
self.reg_convs = []
self.cls_preds = []
self.reg_preds = []
self.obj_preds = []
self.stems = []
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.strides = strides
self.grids = [F.zeros(1)] * len(in_channels)
self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
for conv in self.obj_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = F.concat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(output, k, stride_this_level)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(F.full((1, grid.shape[1]), stride_this_level))
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.reshape(batch_size, self.n_anchors, 4, hsize, wsize)
reg_output = (
F.transpose(reg_output, (0, 1, 3, 4, 2)).reshape(batch_size, -1, 4)
)
origin_preds.append(mge.Tensor(reg_output))
else:
output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1)
outputs.append(output)
if self.training:
return self.get_losses(
imgs, x_shifts, y_shifts, expanded_strides,
labels, F.concat(outputs, 1), origin_preds,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2)
outputs = F.transpose(outputs, (0, 2, 1))
if self.decode_in_inference:
return self.decode_outputs(outputs)
else:
return outputs
def get_output_and_grid(self, output, k, stride):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2)
self.grids[k] = grid
output = output.reshape(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = (
F.transpose(output, (0, 1, 3, 4, 2))
.reshape(batch_size, self.n_anchors * hsize * wsize, -1)
)
grid = grid.reshape(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = F.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(F.full((*shape, 1), stride))
grids = | F.concat(grids, axis=1) | megengine.functional.concat |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y
class YOLOXHead(M.Module):
def __init__(
self, num_classes, width=1.0, strides=[8, 16, 32, 64],
in_channels=[256, 512, 1024, 1024], act="silu", depthwise=False
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = []
self.reg_convs = []
self.cls_preds = []
self.reg_preds = []
self.obj_preds = []
self.stems = []
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.strides = strides
self.grids = [F.zeros(1)] * len(in_channels)
self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
for conv in self.obj_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = F.concat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(output, k, stride_this_level)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(F.full((1, grid.shape[1]), stride_this_level))
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.reshape(batch_size, self.n_anchors, 4, hsize, wsize)
reg_output = (
F.transpose(reg_output, (0, 1, 3, 4, 2)).reshape(batch_size, -1, 4)
)
origin_preds.append(mge.Tensor(reg_output))
else:
output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1)
outputs.append(output)
if self.training:
return self.get_losses(
imgs, x_shifts, y_shifts, expanded_strides,
labels, F.concat(outputs, 1), origin_preds,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2)
outputs = F.transpose(outputs, (0, 2, 1))
if self.decode_in_inference:
return self.decode_outputs(outputs)
else:
return outputs
def get_output_and_grid(self, output, k, stride):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2)
self.grids[k] = grid
output = output.reshape(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = (
F.transpose(output, (0, 1, 3, 4, 2))
.reshape(batch_size, self.n_anchors * hsize * wsize, -1)
)
grid = grid.reshape(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = F.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(F.full((*shape, 1), stride))
grids = F.concat(grids, axis=1)
strides = | F.concat(strides, axis=1) | megengine.functional.concat |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y
class YOLOXHead(M.Module):
def __init__(
self, num_classes, width=1.0, strides=[8, 16, 32, 64],
in_channels=[256, 512, 1024, 1024], act="silu", depthwise=False
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = []
self.reg_convs = []
self.cls_preds = []
self.reg_preds = []
self.obj_preds = []
self.stems = []
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.strides = strides
self.grids = [F.zeros(1)] * len(in_channels)
self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
for conv in self.obj_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = F.concat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(output, k, stride_this_level)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(F.full((1, grid.shape[1]), stride_this_level))
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.reshape(batch_size, self.n_anchors, 4, hsize, wsize)
reg_output = (
F.transpose(reg_output, (0, 1, 3, 4, 2)).reshape(batch_size, -1, 4)
)
origin_preds.append(mge.Tensor(reg_output))
else:
output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1)
outputs.append(output)
if self.training:
return self.get_losses(
imgs, x_shifts, y_shifts, expanded_strides,
labels, F.concat(outputs, 1), origin_preds,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2)
outputs = F.transpose(outputs, (0, 2, 1))
if self.decode_in_inference:
return self.decode_outputs(outputs)
else:
return outputs
def get_output_and_grid(self, output, k, stride):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2)
self.grids[k] = grid
output = output.reshape(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = (
F.transpose(output, (0, 1, 3, 4, 2))
.reshape(batch_size, self.n_anchors * hsize * wsize, -1)
)
grid = grid.reshape(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = F.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(F.full((*shape, 1), stride))
grids = F.concat(grids, axis=1)
strides = F.concat(strides, axis=1)
outputs[..., :2] = (outputs[..., :2] + grids) * strides
outputs[..., 2:4] = F.exp(outputs[..., 2:4]) * strides
return outputs
def focal_loss_discrite(self, pred, gt):
pos_inds = F.equal(gt, 1).astype("float32")
neg_inds = F.equal(gt, 0).astype("float32")
pos_loss = F.log(pred+1e-5) * F.pow(1-pred, 2) * pos_inds * 0.75
neg_loss = F.log(1-pred+1e-5) * F.pow(pred, 2) * neg_inds * 0.25
loss = -(pos_loss + neg_loss)
return loss
def get_losses(
self, imgs, x_shifts, y_shifts, expanded_strides, labels, outputs, origin_preds,
):
bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]
obj_preds = | F.expand_dims(outputs[:, :, 4], axis=-1) | megengine.functional.expand_dims |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y
class YOLOXHead(M.Module):
def __init__(
self, num_classes, width=1.0, strides=[8, 16, 32, 64],
in_channels=[256, 512, 1024, 1024], act="silu", depthwise=False
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = []
self.reg_convs = []
self.cls_preds = []
self.reg_preds = []
self.obj_preds = []
self.stems = []
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.strides = strides
self.grids = [F.zeros(1)] * len(in_channels)
self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
for conv in self.obj_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = F.concat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(output, k, stride_this_level)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(F.full((1, grid.shape[1]), stride_this_level))
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.reshape(batch_size, self.n_anchors, 4, hsize, wsize)
reg_output = (
F.transpose(reg_output, (0, 1, 3, 4, 2)).reshape(batch_size, -1, 4)
)
origin_preds.append(mge.Tensor(reg_output))
else:
output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1)
outputs.append(output)
if self.training:
return self.get_losses(
imgs, x_shifts, y_shifts, expanded_strides,
labels, F.concat(outputs, 1), origin_preds,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2)
outputs = F.transpose(outputs, (0, 2, 1))
if self.decode_in_inference:
return self.decode_outputs(outputs)
else:
return outputs
def get_output_and_grid(self, output, k, stride):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2)
self.grids[k] = grid
output = output.reshape(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = (
F.transpose(output, (0, 1, 3, 4, 2))
.reshape(batch_size, self.n_anchors * hsize * wsize, -1)
)
grid = grid.reshape(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = F.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(F.full((*shape, 1), stride))
grids = F.concat(grids, axis=1)
strides = F.concat(strides, axis=1)
outputs[..., :2] = (outputs[..., :2] + grids) * strides
outputs[..., 2:4] = F.exp(outputs[..., 2:4]) * strides
return outputs
def focal_loss_discrite(self, pred, gt):
pos_inds = F.equal(gt, 1).astype("float32")
neg_inds = F.equal(gt, 0).astype("float32")
pos_loss = F.log(pred+1e-5) * F.pow(1-pred, 2) * pos_inds * 0.75
neg_loss = F.log(1-pred+1e-5) * F.pow(pred, 2) * neg_inds * 0.25
loss = -(pos_loss + neg_loss)
return loss
def get_losses(
self, imgs, x_shifts, y_shifts, expanded_strides, labels, outputs, origin_preds,
):
bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]
obj_preds = F.expand_dims(outputs[:, :, 4], axis=-1) # [batch, n_anchors_all, 1]
cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]
# calculate targets
mixup = labels.shape[2] > 5
if mixup:
label_cut = labels[..., :5]
else:
label_cut = labels
nlabel = (label_cut.sum(axis=2) > 0).sum(axis=1) # number of objects
total_num_anchors = outputs.shape[1]
x_shifts = | F.concat(x_shifts, 1) | megengine.functional.concat |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y
class YOLOXHead(M.Module):
def __init__(
self, num_classes, width=1.0, strides=[8, 16, 32, 64],
in_channels=[256, 512, 1024, 1024], act="silu", depthwise=False
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = []
self.reg_convs = []
self.cls_preds = []
self.reg_preds = []
self.obj_preds = []
self.stems = []
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.strides = strides
self.grids = [F.zeros(1)] * len(in_channels)
self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
for conv in self.obj_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = F.concat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(output, k, stride_this_level)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(F.full((1, grid.shape[1]), stride_this_level))
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.reshape(batch_size, self.n_anchors, 4, hsize, wsize)
reg_output = (
F.transpose(reg_output, (0, 1, 3, 4, 2)).reshape(batch_size, -1, 4)
)
origin_preds.append(mge.Tensor(reg_output))
else:
output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1)
outputs.append(output)
if self.training:
return self.get_losses(
imgs, x_shifts, y_shifts, expanded_strides,
labels, F.concat(outputs, 1), origin_preds,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2)
outputs = F.transpose(outputs, (0, 2, 1))
if self.decode_in_inference:
return self.decode_outputs(outputs)
else:
return outputs
def get_output_and_grid(self, output, k, stride):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2)
self.grids[k] = grid
output = output.reshape(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = (
F.transpose(output, (0, 1, 3, 4, 2))
.reshape(batch_size, self.n_anchors * hsize * wsize, -1)
)
grid = grid.reshape(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = F.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(F.full((*shape, 1), stride))
grids = F.concat(grids, axis=1)
strides = F.concat(strides, axis=1)
outputs[..., :2] = (outputs[..., :2] + grids) * strides
outputs[..., 2:4] = F.exp(outputs[..., 2:4]) * strides
return outputs
def focal_loss_discrite(self, pred, gt):
pos_inds = F.equal(gt, 1).astype("float32")
neg_inds = F.equal(gt, 0).astype("float32")
pos_loss = F.log(pred+1e-5) * F.pow(1-pred, 2) * pos_inds * 0.75
neg_loss = F.log(1-pred+1e-5) * F.pow(pred, 2) * neg_inds * 0.25
loss = -(pos_loss + neg_loss)
return loss
def get_losses(
self, imgs, x_shifts, y_shifts, expanded_strides, labels, outputs, origin_preds,
):
bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]
obj_preds = F.expand_dims(outputs[:, :, 4], axis=-1) # [batch, n_anchors_all, 1]
cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]
# calculate targets
mixup = labels.shape[2] > 5
if mixup:
label_cut = labels[..., :5]
else:
label_cut = labels
nlabel = (label_cut.sum(axis=2) > 0).sum(axis=1) # number of objects
total_num_anchors = outputs.shape[1]
x_shifts = F.concat(x_shifts, 1) # [1, n_anchors_all]
y_shifts = | F.concat(y_shifts, 1) | megengine.functional.concat |
Subsets and Splits