prompt
stringlengths
94
42.6k
completion
stringlengths
6
120
api
stringlengths
14
68
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]), tensor([1])) # test returning noncontiguous tensor from trace def test_slice(): @trace def f(x): return x[:, 1::2] x = F.arange(8).reshape(2, 4) f(x) y = f(x) np.testing.assert_array_equal(y.numpy(), x.numpy()[:, 1::2]) y + y @pytest.mark.parametrize("shape_mode", [False, True]) def test_random(shape_mode): def run_test(op): @
trace(symbolic=True, symbolic_shape=shape_mode)
megengine.jit.trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]), tensor([1])) # test returning noncontiguous tensor from trace def test_slice(): @trace def f(x): return x[:, 1::2] x = F.arange(8).reshape(2, 4) f(x) y = f(x) np.testing.assert_array_equal(y.numpy(), x.numpy()[:, 1::2]) y + y @pytest.mark.parametrize("shape_mode", [False, True]) def test_random(shape_mode): def run_test(op): @trace(symbolic=True, symbolic_shape=shape_mode) def f(): out = op(size=[10, 10]) out_shape = out.shape assert out_shape is not None if not isinstance(out_shape, tuple): assert out.shape.numpy() is not None return out for _ in range(3): f() run_test(uniform) run_test(normal) @pytest.mark.parametrize("shape_mode", [False, True]) def test_trace_advance_indexing(shape_mode): funcs = [ lambda x, i: x[i], # lambda x, i, j: x[i, j], # FIXME lambda x, i, j: x[i, :, j, ...], # lambda x, start, end: x[start:end], # FIXME lambda x, start, end: x[:, 0, start:end, ..., 1], lambda x, vec: x[vec], lambda x, vec: x[vec, ..., 0, 1:3], lambda x, vec: x[vec, vec[0], vec[1]], # lambda x, i, start, end, vec: x[i, ..., :, vec, start:end], # FIXME lambda x, mask: x[mask], ] inputs = { "x": np.random.randn(5, 5, 5, 5, 5).astype("float32"), "i": 0, "j": 2, "start": 1, "end": 3, "vec": [1, 2, 3], "mask": np.random.randn(5, 5, 5, 5, 5) >= 0, } for f in funcs: sig = inspect.signature(f) param_names = list(sig._parameters.keys()) params = {} params_np = {} f_traced =
trace(f, symbolic=False, symbolic_shape=shape_mode)
megengine.jit.trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a =
Parameter([1.0], dtype=np.float32)
megengine.Parameter
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x =
F.exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with
exclude_from_trace()
megengine.jit.exclude_from_trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm =
GradManager()
megengine.autodiff.GradManager
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(
tensor([1, 10], dtype=np.int32)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return
tensor(boxes)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes),
tensor(scores)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with
exclude_from_trace()
megengine.jit.exclude_from_trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ =
F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
megengine.functional.vision.nms
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x,
tensor([0])
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]),
tensor([1])
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]), tensor([1])) # test returning noncontiguous tensor from trace def test_slice(): @trace def f(x): return x[:, 1::2] x =
F.arange(8)
megengine.functional.arange
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) + exp(y)) val = 1.0e4 d = tensor(val) o = tensor(0.0) assert not np.isfinite(f(d, o).numpy()) np.testing.assert_almost_equal(g(d, o), val) def test_goptions_log_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): return log(exp(x)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): return log(exp(x)) f(tensor(1.0)) _, out = mkstemp() f.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_1 = cgtools.get_oprs_seq(outputs) g(tensor(1.0)) g.dump(out, optimize_for_inference=False) *_, outputs = G.load_graph(out) oprs_2 = cgtools.get_oprs_seq(outputs) assert len(oprs_1) - len(oprs_2) == 2 def test_optimize_for_inference(): @trace(symbolic=True, capture_as_const=True) def f(x): return exp(x) _, out = mkstemp() f(tensor(5.0)) f.dump(out, enable_io16xc32=True) res = G.load_graph(out) computing_input = res.output_vars_list[0].owner.inputs[0] assert computing_input.dtype == np.float16 def test_optimize_for_inference_broadcast(): a = tensor(np.ones(1, dtype=np.float32)) @trace(capture_as_const=True, symbolic_shape=True) def f(): return a._broadcast(tensor([1, 10], dtype=np.int32)) f() f.dump(io.BytesIO()) def test_trace_cvt_bool(): x = tensor([0], dtype=np.int32) @trace(symbolic=True) def f(x): a = x.shape b = a[0] assert isscalar(b) return b == 0 for i in range(3): np.testing.assert_equal(f(x).numpy(), False) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_reshape(trace_mode): x1 = tensor(np.random.randn(2, 10, 10)) x2 = tensor(np.random.randn(4, 10, 10)) x3 = tensor(np.random.randn(8, 10, 10)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = x.reshape(x.shape[0], 100) return y f(x1) f(x2) f(x3) def test_trace_topk(): x = tensor([5, 2, 7, 1, 0, 3, 2]) @trace(symbolic=True) def f(x): y = F.topk(x, 3) np.testing.assert_equal(y[0].shape.numpy(), np.array([3,])) return y for i in range(3): f(x) def test_trace_warp_perspective(): inp_shape = (1, 1, 4, 4) x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape)) M_shape = (1, 3, 3) M = tensor( np.array( [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32 ).reshape(M_shape) ) @trace(symbolic=True) def f(x, M): out = F.vision.warp_perspective(x, M, (2, 2)) np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2])) return out for i in range(3): f(x, M) def test_raise_on_trace(): step_count = 0 catch_count = 0 bad_step = 10 class CatchMe(Exception): pass a = tensor([1, 2, 3, 4]) b = tensor([5, 6, 7, 8]) c = tensor([9, 0, 1, 2]) @trace def add_abc(a, b, c): ps = a + b result = ps + c if step_count == bad_step: raise CatchMe("catch me") return result for i in range(100): try: d = add_abc(a, b, c) except CatchMe as e: catch_count += 1 else: np.testing.assert_equal(d.numpy(), (a + b + c).numpy()) step_count += 1 assert catch_count == 1 @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_broadcast(trace_mode): x1 = tensor(np.random.randn(3, 1, 1)) x2 = tensor(np.random.randn(1, 4, 1)) x3 = tensor(np.random.randn(1, 1, 5)) @trace(symbolic=trace_mode, capture_as_const=True) def f(x): y = F.broadcast_to(x, (3, 4, 5)) return y f(x1) f(x2) f(x3) def test_trace_nms(): def make_inputs(n): boxes = np.zeros((n, 4)) boxes[:, :2] = np.random.rand(n, 2) * 100 boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100 scores = np.random.rand(n) return tensor(boxes), tensor(scores) @trace(symbolic=False) def f(boxes, scores): # with tracing, max_output must be specified results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) # without tracing, max output can be inferred inside nms with exclude_from_trace(): _ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5) return results f(*make_inputs(10)) f(*make_inputs(20)) f(*make_inputs(30)) def test_trace_valid_broadcast(): x1 = tensor(np.random.randn(1, 1)) x2 = tensor(np.random.randn(1, 2)) shape = (tensor([2]), tensor([2])) @trace(symbolic=False) def f(x, shape): y = F.broadcast_to(x, shape) return y f(x1, shape) f(x2, shape) def test_clip(): x = tensor(np.random.randn(10, 10)) @trace(symbolic=True) def f(x, lower, upper): y = F.clip(x, lower, upper) return y for i in range(3): f(x, tensor([0]), tensor([1])) # test returning noncontiguous tensor from trace def test_slice(): @trace def f(x): return x[:, 1::2] x = F.arange(8).reshape(2, 4) f(x) y = f(x) np.testing.assert_array_equal(y.numpy(), x.numpy()[:, 1::2]) y + y @pytest.mark.parametrize("shape_mode", [False, True]) def test_random(shape_mode): def run_test(op): @trace(symbolic=True, symbolic_shape=shape_mode) def f(): out = op(size=[10, 10]) out_shape = out.shape assert out_shape is not None if not isinstance(out_shape, tuple): assert out.shape.numpy() is not None return out for _ in range(3): f() run_test(uniform) run_test(normal) @pytest.mark.parametrize("shape_mode", [False, True]) def test_trace_advance_indexing(shape_mode): funcs = [ lambda x, i: x[i], # lambda x, i, j: x[i, j], # FIXME lambda x, i, j: x[i, :, j, ...], # lambda x, start, end: x[start:end], # FIXME lambda x, start, end: x[:, 0, start:end, ..., 1], lambda x, vec: x[vec], lambda x, vec: x[vec, ..., 0, 1:3], lambda x, vec: x[vec, vec[0], vec[1]], # lambda x, i, start, end, vec: x[i, ..., :, vec, start:end], # FIXME lambda x, mask: x[mask], ] inputs = { "x": np.random.randn(5, 5, 5, 5, 5).astype("float32"), "i": 0, "j": 2, "start": 1, "end": 3, "vec": [1, 2, 3], "mask": np.random.randn(5, 5, 5, 5, 5) >= 0, } for f in funcs: sig = inspect.signature(f) param_names = list(sig._parameters.keys()) params = {} params_np = {} f_traced = trace(f, symbolic=False, symbolic_shape=shape_mode) for name in param_names: params[name] =
tensor(inputs[name])
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm =
GradManager()
megengine.autodiff.GradManager
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm =
F.sum(w ** 2, axis=1)
megengine.functional.sum
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(
cgtools.get_owner_opr_inputs(out)
megengine.utils.comp_graph_tools.get_owner_opr_inputs
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) +
exp(y)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(
exp(x)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import inspect import io import itertools from tempfile import mkstemp import numpy as np import pytest import megengine.core.tensor.megbrain_graph as G import megengine.functional as F import megengine.optimizer as optim import megengine.utils.comp_graph_tools as cgtools from megengine import Parameter, tensor from megengine.autodiff import GradManager from megengine.core._trace_option import set_symbolic_shape from megengine.core.ops import builtin as ops from megengine.core.ops.builtin import Elemwise from megengine.core.tensor.utils import isscalar from megengine.functional import exp, log from megengine.jit import exclude_from_trace, trace from megengine.module import Module from megengine.random import normal, uniform from megengine.utils.naming import AutoNaming @pytest.mark.parametrize("trace_mode", [False, True]) @pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"]) def test_trace(trace_mode, return_mode): @trace(symbolic=trace_mode) def f(x): if return_mode == "Tuple": return (-x,) elif return_mode == "List": return [-x] elif return_mode == "Dict": return {"neg": -x} else: return -x def get_numpy(y): if return_mode == "Tuple" or return_mode == "List": return y[0].numpy() elif return_mode == "Dict": return y["neg"].numpy() return y.numpy() x = tensor([1]) y = get_numpy(f(x)) for i in range(3): np.testing.assert_equal(get_numpy(f(x)), y) def test_output_copy_trace(): class Simple(Module): def __init__(self): super().__init__() self.a = Parameter([1.0], dtype=np.float32) def forward(self, x): x = x * self.a # will result into a copy of output in grad x = F.exp(x) return x ys = {False: [], True: []} for symbolic in [False, True]: net = Simple() gm = GradManager().attach(net.parameters()) opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9) data = tensor(np.arange(4).reshape(2, 2), dtype="float32") @trace(symbolic=symbolic) def train_func(d): with gm: loss = net(d) gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = train_func(data).numpy() ys[symbolic].append(y) for i in range(3): np.testing.assert_equal(ys[False][i], ys[True][i]) @pytest.mark.parametrize("trace_mode", [False, True]) def test_exclude_from_trace(trace_mode): @trace(symbolic=trace_mode) def f(x): x = -x with exclude_from_trace(): if i % 2: x = -x x = -x return x x = tensor([1]) for i in range(3): y = f(x).numpy() np.testing.assert_equal(f(x).numpy(), y) @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse(trace_mode): # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(a, b): base = 0 c = b - a _, idx = F.topk(c, 3) # internally, biased_idx will be idx as gopt will ignore the addition biased_idx = base + idx return biased_idx a = tensor(np.ones((7, 2)), dtype=np.int32) b = tensor(2 * np.ones((7, 2)), dtype=np.float32) for i in range(3): y = f(a, b) y.numpy() @pytest.mark.parametrize("trace_mode", [False, True]) def test_elemwise_fuse_in_grad(trace_mode): w = Parameter(np.ones([4, 6]), dtype="float32") gm = GradManager().attach(w) opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) # explicitly declare opt_level as 2 @trace(symbolic=trace_mode, opt_level=2) def f(): with gm: wm = F.sum(w ** 2, axis=1) ** 0.5 loss = wm.mean() gm.backward(loss) opt.step().clear_grad() return loss for i in range(3): y = f() y.numpy() def test_print_in_trace(): for symbolic in [False]: # cannot read value in symbolic mode @trace(symbolic=symbolic) def f(x): nonlocal buf x = -x buf = x.numpy() x = -x return x buf = None x = tensor([1]) for i in range(3): y = f(x).numpy() z = buf buf = None np.testing.assert_equal(f(x).numpy(), y) np.testing.assert_equal(z, buf) def test_dump(): @trace(symbolic=True, capture_as_const=True) def f(a, b): return a + b # prevent from remaining scope from exception test AutoNaming.clear() a = tensor([2]) b = tensor([4]) y = f(a, b).numpy() for i in range(3): np.testing.assert_equal(f(a, b).numpy(), y) file = io.BytesIO() dump_info = f.dump(file) assert dump_info.nr_opr == 3 np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"]) np.testing.assert_equal(dump_info.outputs, ["ADD"]) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(a, b)).values())[0] np.testing.assert_equal(result[0], y) def test_capture_dump(): a = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * a x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file) file.seek(0) infer_cg = cgtools.GraphInference(file) result = list((infer_cg.run(x)).values())[0] np.testing.assert_equal(result[0], y) def test_dump_volatile(): p = tensor([2]) @trace(symbolic=True, capture_as_const=True) def f(x): return x * p x = tensor([3]) y = f(x).numpy() for i in range(3): np.testing.assert_equal(f(x).numpy(), y) file = io.BytesIO() f.dump(file, optimize_for_inference=False) file.seek(0) cg, _, outputs = G.load_graph(file) (out,) = outputs assert ( cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1]) == "ImmutableTensor" ) @pytest.mark.parametrize("trace_mode", [False, True]) def test_trace_profiler(trace_mode): @trace(symbolic=trace_mode, profiling=True) def f(x): return -x x = tensor([1]) y = f(x).numpy() f(x) f(x) # XXX: has to run twice out = f.get_profile() assert out.get("profiler") def test_goptions(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x): # directly return x / x will not trigger gopt # since there's no way to tell the two x are the same y = 2.0 * x return y / y @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x): y = 2.0 * x return y / y d = tensor(0.0) assert not np.isfinite(f(d).numpy()) np.testing.assert_equal(g(d).numpy().item(), 1.0) def test_goptions_log_sum_exp(): @trace(symbolic=True, opt_level=0, capture_as_const=True) def f(x, y): return log(exp(x) + exp(y)) @trace(symbolic=True, opt_level=1, capture_as_const=True) def g(x, y): return log(exp(x) +
exp(y)
megengine.functional.exp
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if
mge.is_cuda_available()
megengine.is_cuda_available
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss =
F.cross_entropy_with_softmax(pred, label)
megengine.functional.cross_entropy_with_softmax
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint =
mge.load(model_path)
megengine.load
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data =
tensor(dtype=np.float32)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label =
tensor(dtype=np.int32)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} )
mge.save(checkpoint, model_path)
megengine.save
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} ) mge.save(checkpoint, model_path) def run_test(model_path, use_jit, use_symbolic): """ Load the model with test cases and run the training for one iter. The loss and updated weights are compared with reference value to verify the correctness. Dump a new file with updated result by calling update_model if you think the test fails due to numerical rounding errors instead of bugs. Please think twice before you do so. """ net = MnistNet(has_bn=True) checkpoint =
mge.load(model_path)
megengine.load
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} ) mge.save(checkpoint, model_path) def run_test(model_path, use_jit, use_symbolic): """ Load the model with test cases and run the training for one iter. The loss and updated weights are compared with reference value to verify the correctness. Dump a new file with updated result by calling update_model if you think the test fails due to numerical rounding errors instead of bugs. Please think twice before you do so. """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data =
tensor(dtype=np.float32)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} ) mge.save(checkpoint, model_path) def run_test(model_path, use_jit, use_symbolic): """ Load the model with test cases and run the training for one iter. The loss and updated weights are compared with reference value to verify the correctness. Dump a new file with updated result by calling update_model if you think the test fails due to numerical rounding errors instead of bugs. Please think twice before you do so. """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label =
tensor(dtype=np.int32)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} ) mge.save(checkpoint, model_path) def run_test(model_path, use_jit, use_symbolic): """ Load the model with test cases and run the training for one iter. The loss and updated weights are compared with reference value to verify the correctness. Dump a new file with updated result by calling update_model if you think the test fails due to numerical rounding errors instead of bugs. Please think twice before you do so. """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) max_err = 1e-5 train_func = train if use_jit: train_func = jit.trace(train_func, symbolic=use_symbolic) opt.zero_grad() loss = train_func(data, label, net=net, opt=opt) opt.step() assertTensorClose(loss.numpy(), checkpoint["loss"], max_err=max_err) for param, param_ref in zip( net.state_dict().items(), checkpoint["net_updated"].items() ): assert param[0] == param_ref[0] assertTensorClose(param[1], param_ref[1], max_err=max_err) def test_correctness(): if
mge.is_cuda_available()
megengine.is_cuda_available
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} ) mge.save(checkpoint, model_path) def run_test(model_path, use_jit, use_symbolic): """ Load the model with test cases and run the training for one iter. The loss and updated weights are compared with reference value to verify the correctness. Dump a new file with updated result by calling update_model if you think the test fails due to numerical rounding errors instead of bugs. Please think twice before you do so. """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) max_err = 1e-5 train_func = train if use_jit: train_func = jit.trace(train_func, symbolic=use_symbolic) opt.zero_grad() loss = train_func(data, label, net=net, opt=opt) opt.step() assertTensorClose(loss.numpy(), checkpoint["loss"], max_err=max_err) for param, param_ref in zip( net.state_dict().items(), checkpoint["net_updated"].items() ): assert param[0] == param_ref[0] assertTensorClose(param[1], param_ref[1], max_err=max_err) def test_correctness(): if mge.is_cuda_available(): model_name = "mnist_model_with_test.mge" else: model_name = "mnist_model_with_test_cpu.mge" model_path = os.path.join(os.path.dirname(__file__), model_name)
set_conv_execution_strategy("HEURISTIC_REPRODUCIBLE")
megengine.functional.debug_param.set_conv_execution_strategy
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 =
Conv2d(1, 20, kernel_size=5, bias=True)
megengine.module.Conv2d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 =
AvgPool2d(2)
megengine.module.AvgPool2d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 =
Conv2d(20, 20, kernel_size=5, bias=True)
megengine.module.Conv2d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 =
AvgPool2d(2)
megengine.module.AvgPool2d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 =
Linear(20 * 4 * 4, 500, bias=True)
megengine.module.Linear
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 =
Linear(500, 10, bias=True)
megengine.module.Linear
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x =
F.relu(x)
megengine.functional.relu
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x =
F.relu(x)
megengine.functional.relu
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x =
F.flatten(x, 1)
megengine.functional.flatten
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x =
F.relu(x)
megengine.functional.relu
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} ) mge.save(checkpoint, model_path) def run_test(model_path, use_jit, use_symbolic): """ Load the model with test cases and run the training for one iter. The loss and updated weights are compared with reference value to verify the correctness. Dump a new file with updated result by calling update_model if you think the test fails due to numerical rounding errors instead of bugs. Please think twice before you do so. """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) max_err = 1e-5 train_func = train if use_jit: train_func =
jit.trace(train_func, symbolic=use_symbolic)
megengine.jit.trace
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 = BatchNorm2d(20) def forward(self, x): x = self.conv0(x) if self.bn0: x = self.bn0(x) x = F.relu(x) x = self.pool0(x) x = self.conv1(x) if self.bn1: x = self.bn1(x) x = F.relu(x) x = self.pool1(x) x = F.flatten(x, 1) x = self.fc0(x) x = F.relu(x) x = self.fc1(x) return x def train(data, label, net, opt): pred = net(data) loss = F.cross_entropy_with_softmax(pred, label) opt.backward(loss) return loss def update_model(model_path): """ Update the dumped model with test cases for new reference values. The model with pre-trained weights is trained for one iter with the test data attached. The loss and updated net state dict is dumped. .. code-block:: python from test_correctness import update_model update_model('mnist_model_with_test.mge') # for gpu update_model('mnist_model_with_test_cpu.mge') # for cpu """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) opt.zero_grad() loss = train(data, label, net=net, opt=opt) opt.step() xpu_name = get_xpu_name() checkpoint.update( {"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name} ) mge.save(checkpoint, model_path) def run_test(model_path, use_jit, use_symbolic): """ Load the model with test cases and run the training for one iter. The loss and updated weights are compared with reference value to verify the correctness. Dump a new file with updated result by calling update_model if you think the test fails due to numerical rounding errors instead of bugs. Please think twice before you do so. """ net = MnistNet(has_bn=True) checkpoint = mge.load(model_path) net.load_state_dict(checkpoint["net_init"]) lr = checkpoint["sgd_lr"] opt = SGD(net.parameters(), lr=lr) data = tensor(dtype=np.float32) label = tensor(dtype=np.int32) data.set_value(checkpoint["data"]) label.set_value(checkpoint["label"]) max_err = 1e-5 train_func = train if use_jit: train_func = jit.trace(train_func, symbolic=use_symbolic) opt.zero_grad() loss = train_func(data, label, net=net, opt=opt) opt.step() assertTensorClose(loss.numpy(), checkpoint["loss"], max_err=max_err) for param, param_ref in zip( net.state_dict().items(), checkpoint["net_updated"].items() ): assert param[0] == param_ref[0]
assertTensorClose(param[1], param_ref[1], max_err=max_err)
megengine.test.assertTensorClose
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 =
BatchNorm2d(20)
megengine.module.BatchNorm2d
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import re import subprocess import sys import numpy as np import megengine as mge import megengine.functional as F from megengine import jit, tensor from megengine.functional.debug_param import set_conv_execution_strategy from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module from megengine.optimizer import SGD from megengine.test import assertTensorClose def get_gpu_name(): try: gpu_info = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"] ) gpu_info = gpu_info.decode("ascii").split("\n")[0] except: gpu_info = "None" return gpu_info def get_cpu_name(): cpu_info = "None" try: cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii") for line in cpu_info.split("\n"): if "model name" in line: return re.sub(".*model name.*:", "", line, 1).strip() except: pass return cpu_info def get_xpu_name(): if mge.is_cuda_available(): return get_gpu_name() else: return get_cpu_name() class MnistNet(Module): def __init__(self, has_bn=False): super().__init__() self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True) self.pool0 = AvgPool2d(2) self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True) self.pool1 = AvgPool2d(2) self.fc0 = Linear(20 * 4 * 4, 500, bias=True) self.fc1 = Linear(500, 10, bias=True) self.bn0 = None self.bn1 = None if has_bn: self.bn0 = BatchNorm2d(20) self.bn1 =
BatchNorm2d(20)
megengine.module.BatchNorm2d
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout =
M.dropout.Dropout(dropout_rate)
megengine.module.dropout.Dropout
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = M.dropout.Dropout(dropout_rate) self.pe =
mge.Tensor(0.0)
megengine.Tensor
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = M.dropout.Dropout(dropout_rate) self.pe = mge.Tensor(0.0) self.extend_pe(F.tensor.zeros([1, max_len])) def extend_pe(self, x): """Reset the positional encodings.""" if len(self.pe.shape): if self.pe.shape[1] >= x.shape[1]: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe =
F.tensor.zeros([x.shape[1], self.d_model])
megengine.functional.tensor.zeros
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = M.dropout.Dropout(dropout_rate) self.pe = mge.Tensor(0.0) self.extend_pe(F.tensor.zeros([1, max_len])) def extend_pe(self, x): """Reset the positional encodings.""" if len(self.pe.shape): if self.pe.shape[1] >= x.shape[1]: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = F.tensor.zeros([x.shape[1], self.d_model]) position = mge.Tensor(F.arange(0, x.shape[1], dtype="float32")).reshape( x.shape[1], -1 ) div_term = F.exp( mge.Tensor(F.arange(0, self.d_model, 2, dtype="float32")) * -(math.log(10000.0) / self.d_model) ) pe[:, 0::2] =
F.sin(position * div_term)
megengine.functional.sin
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = M.dropout.Dropout(dropout_rate) self.pe = mge.Tensor(0.0) self.extend_pe(F.tensor.zeros([1, max_len])) def extend_pe(self, x): """Reset the positional encodings.""" if len(self.pe.shape): if self.pe.shape[1] >= x.shape[1]: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = F.tensor.zeros([x.shape[1], self.d_model]) position = mge.Tensor(F.arange(0, x.shape[1], dtype="float32")).reshape( x.shape[1], -1 ) div_term = F.exp( mge.Tensor(F.arange(0, self.d_model, 2, dtype="float32")) * -(math.log(10000.0) / self.d_model) ) pe[:, 0::2] = F.sin(position * div_term) pe[:, 1::2] =
F.cos(position * div_term)
megengine.functional.cos
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = M.dropout.Dropout(dropout_rate) self.pe = mge.Tensor(0.0) self.extend_pe(
F.tensor.zeros([1, max_len])
megengine.functional.tensor.zeros
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = M.dropout.Dropout(dropout_rate) self.pe = mge.Tensor(0.0) self.extend_pe(F.tensor.zeros([1, max_len])) def extend_pe(self, x): """Reset the positional encodings.""" if len(self.pe.shape): if self.pe.shape[1] >= x.shape[1]: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = F.tensor.zeros([x.shape[1], self.d_model]) position = mge.Tensor(
F.arange(0, x.shape[1], dtype="float32")
megengine.functional.arange
import math import megengine.module as M import megengine.functional as F import megengine as mge class PositionalEncoding(M.Module): """Positional encoding. :param int d_model: embedding dim :param float dropout_rate: dropout rate :param int max_len: maximum input length """ def __init__(self, d_model, dropout_rate, max_len=5000): """Construct an PositionalEncoding object.""" super(PositionalEncoding, self).__init__() self.d_model = d_model self.xscale = math.sqrt(self.d_model) self.dropout = M.dropout.Dropout(dropout_rate) self.pe = mge.Tensor(0.0) self.extend_pe(F.tensor.zeros([1, max_len])) def extend_pe(self, x): """Reset the positional encodings.""" if len(self.pe.shape): if self.pe.shape[1] >= x.shape[1]: if self.pe.dtype != x.dtype or self.pe.device != x.device: self.pe = self.pe.to(dtype=x.dtype, device=x.device) return pe = F.tensor.zeros([x.shape[1], self.d_model]) position = mge.Tensor(F.arange(0, x.shape[1], dtype="float32")).reshape( x.shape[1], -1 ) div_term = F.exp( mge.Tensor(
F.arange(0, self.d_model, 2, dtype="float32")
megengine.functional.arange
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import copy from typing import Any, Mapping import megengine as mge import megengine.functional as F import megengine.module as M from .modules import SE, activation, conv2d, gap2d, linear, norm2d __all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"] def build_head( w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu" ) -> M.Module: """The factory function to build head. Note: if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do nothing and return ``None``. Args: w_in: input width. head_args: head args. Default: ``None`` norm_name: default normalization function, will be overridden by the same key in ``head_args``. Default: ``"BN"`` act_name: default activation function, will be overridden by the same key in ``head_args``. Default: ``"relu"`` Returns: A head. """ if head_args is None: return None head_args = copy.deepcopy(head_args) head_name = head_args.pop("name", None) if head_name is None: return None head_args["w_in"] = w_in head_args.setdefault("norm_name", norm_name) head_args.setdefault("act_name", act_name) if callable(head_name): return head_name(**head_args) if isinstance(head_name, str): head_funcs = { "ClsHead": ClsHead, "MBV3Head": MBV3Head, "VGGHead": VGGHead, } if head_name in head_funcs: return head_funcs[head_name](**head_args) raise ValueError(f"Head '{head_name}' not supported") class ClsHead(M.Module): """Cls head: Conv, BN, Act, AvgPool, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for first conv in head, conv will be omitted if set to 0. Default: ``0`` dropout_prob: dropout probability. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"relu"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 0, dropout_prob: float = 0.0, norm_name: str = "BN", act_name: str = "relu", bias: bool = True, ): super().__init__() self.width = width if self.width > 0: self.conv = conv2d(w_in, self.width, 1) self.bn = norm2d(norm_name, self.width) self.act = activation(act_name) w_in = self.width self.avg_pool = gap2d() if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_in, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: if self.width > 0: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) x =
F.flatten(x, 1)
megengine.functional.flatten
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import copy from typing import Any, Mapping import megengine as mge import megengine.functional as F import megengine.module as M from .modules import SE, activation, conv2d, gap2d, linear, norm2d __all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"] def build_head( w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu" ) -> M.Module: """The factory function to build head. Note: if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do nothing and return ``None``. Args: w_in: input width. head_args: head args. Default: ``None`` norm_name: default normalization function, will be overridden by the same key in ``head_args``. Default: ``"BN"`` act_name: default activation function, will be overridden by the same key in ``head_args``. Default: ``"relu"`` Returns: A head. """ if head_args is None: return None head_args = copy.deepcopy(head_args) head_name = head_args.pop("name", None) if head_name is None: return None head_args["w_in"] = w_in head_args.setdefault("norm_name", norm_name) head_args.setdefault("act_name", act_name) if callable(head_name): return head_name(**head_args) if isinstance(head_name, str): head_funcs = { "ClsHead": ClsHead, "MBV3Head": MBV3Head, "VGGHead": VGGHead, } if head_name in head_funcs: return head_funcs[head_name](**head_args) raise ValueError(f"Head '{head_name}' not supported") class ClsHead(M.Module): """Cls head: Conv, BN, Act, AvgPool, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for first conv in head, conv will be omitted if set to 0. Default: ``0`` dropout_prob: dropout probability. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"relu"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 0, dropout_prob: float = 0.0, norm_name: str = "BN", act_name: str = "relu", bias: bool = True, ): super().__init__() self.width = width if self.width > 0: self.conv = conv2d(w_in, self.width, 1) self.bn = norm2d(norm_name, self.width) self.act = activation(act_name) w_in = self.width self.avg_pool = gap2d() if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_in, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: if self.width > 0: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) x = F.flatten(x, 1) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class MBV3Head(M.Module): """MobileNet V3 head: Conv, BN, Act, AvgPool, SE, FC, Act, FC. Args: w_in: input width. w_out: output width, normally the number of classes. width: width for first conv in head. w_h: width for first linear in head. dropout_prob: dropout probability. Default: ``0.0`` se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"hswish"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 960, w_h: int = 1280, dropout_prob: float = 0.0, se_r: float = 0.0, norm_name: str = "BN", act_name: str = "hswish", bias: bool = True, ): super().__init__() self.conv = conv2d(w_in, width, 1) self.bn = norm2d(norm_name, width) self.act = activation(act_name) self.avg_pool = gap2d() if se_r > 0.0: self.se = SE(width, int(se_r * width), act_name) self.h_fc = linear(width, w_h, bias=bias) self.h_act = activation(act_name) if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_h, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) if getattr(self, "se", None) is not None: x = self.se(x) x =
F.flatten(x, 1)
megengine.functional.flatten
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import copy from typing import Any, Mapping import megengine as mge import megengine.functional as F import megengine.module as M from .modules import SE, activation, conv2d, gap2d, linear, norm2d __all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"] def build_head( w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu" ) -> M.Module: """The factory function to build head. Note: if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do nothing and return ``None``. Args: w_in: input width. head_args: head args. Default: ``None`` norm_name: default normalization function, will be overridden by the same key in ``head_args``. Default: ``"BN"`` act_name: default activation function, will be overridden by the same key in ``head_args``. Default: ``"relu"`` Returns: A head. """ if head_args is None: return None head_args = copy.deepcopy(head_args) head_name = head_args.pop("name", None) if head_name is None: return None head_args["w_in"] = w_in head_args.setdefault("norm_name", norm_name) head_args.setdefault("act_name", act_name) if callable(head_name): return head_name(**head_args) if isinstance(head_name, str): head_funcs = { "ClsHead": ClsHead, "MBV3Head": MBV3Head, "VGGHead": VGGHead, } if head_name in head_funcs: return head_funcs[head_name](**head_args) raise ValueError(f"Head '{head_name}' not supported") class ClsHead(M.Module): """Cls head: Conv, BN, Act, AvgPool, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for first conv in head, conv will be omitted if set to 0. Default: ``0`` dropout_prob: dropout probability. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"relu"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 0, dropout_prob: float = 0.0, norm_name: str = "BN", act_name: str = "relu", bias: bool = True, ): super().__init__() self.width = width if self.width > 0: self.conv = conv2d(w_in, self.width, 1) self.bn = norm2d(norm_name, self.width) self.act = activation(act_name) w_in = self.width self.avg_pool = gap2d() if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_in, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: if self.width > 0: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) x = F.flatten(x, 1) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class MBV3Head(M.Module): """MobileNet V3 head: Conv, BN, Act, AvgPool, SE, FC, Act, FC. Args: w_in: input width. w_out: output width, normally the number of classes. width: width for first conv in head. w_h: width for first linear in head. dropout_prob: dropout probability. Default: ``0.0`` se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"hswish"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 960, w_h: int = 1280, dropout_prob: float = 0.0, se_r: float = 0.0, norm_name: str = "BN", act_name: str = "hswish", bias: bool = True, ): super().__init__() self.conv = conv2d(w_in, width, 1) self.bn = norm2d(norm_name, width) self.act = activation(act_name) self.avg_pool = gap2d() if se_r > 0.0: self.se = SE(width, int(se_r * width), act_name) self.h_fc = linear(width, w_h, bias=bias) self.h_act = activation(act_name) if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_h, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) if getattr(self, "se", None) is not None: x = self.se(x) x = F.flatten(x, 1) x = self.h_fc(x) x = self.h_act(x) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class VGGHead(M.Module): """VGG head: AvgPool, [FC, Act, Dropout] x2, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for linear in head. Default: ``4096`` dropout_prob: dropout probability. Default: ``0.5`` act_name: activation function. Default: ``"relu"`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 4096, dropout_prob: float = 0.5, act_name: str = "relu", **kwargs, ): super().__init__() self.avg_pool = gap2d(7) self.classifier = M.Sequential( linear(w_in * 7 * 7, width, bias=True), activation(act_name), M.Dropout(dropout_prob), linear(width, width, bias=True), activation(act_name), M.Dropout(dropout_prob), linear(width, w_out, bias=True), ) def forward(self, x: mge.Tensor) -> mge.Tensor: x = self.avg_pool(x) x =
F.flatten(x, 1)
megengine.functional.flatten
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import copy from typing import Any, Mapping import megengine as mge import megengine.functional as F import megengine.module as M from .modules import SE, activation, conv2d, gap2d, linear, norm2d __all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"] def build_head( w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu" ) -> M.Module: """The factory function to build head. Note: if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do nothing and return ``None``. Args: w_in: input width. head_args: head args. Default: ``None`` norm_name: default normalization function, will be overridden by the same key in ``head_args``. Default: ``"BN"`` act_name: default activation function, will be overridden by the same key in ``head_args``. Default: ``"relu"`` Returns: A head. """ if head_args is None: return None head_args = copy.deepcopy(head_args) head_name = head_args.pop("name", None) if head_name is None: return None head_args["w_in"] = w_in head_args.setdefault("norm_name", norm_name) head_args.setdefault("act_name", act_name) if callable(head_name): return head_name(**head_args) if isinstance(head_name, str): head_funcs = { "ClsHead": ClsHead, "MBV3Head": MBV3Head, "VGGHead": VGGHead, } if head_name in head_funcs: return head_funcs[head_name](**head_args) raise ValueError(f"Head '{head_name}' not supported") class ClsHead(M.Module): """Cls head: Conv, BN, Act, AvgPool, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for first conv in head, conv will be omitted if set to 0. Default: ``0`` dropout_prob: dropout probability. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"relu"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 0, dropout_prob: float = 0.0, norm_name: str = "BN", act_name: str = "relu", bias: bool = True, ): super().__init__() self.width = width if self.width > 0: self.conv = conv2d(w_in, self.width, 1) self.bn = norm2d(norm_name, self.width) self.act = activation(act_name) w_in = self.width self.avg_pool = gap2d() if dropout_prob > 0.0: self.dropout =
M.Dropout(dropout_prob)
megengine.module.Dropout
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import copy from typing import Any, Mapping import megengine as mge import megengine.functional as F import megengine.module as M from .modules import SE, activation, conv2d, gap2d, linear, norm2d __all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"] def build_head( w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu" ) -> M.Module: """The factory function to build head. Note: if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do nothing and return ``None``. Args: w_in: input width. head_args: head args. Default: ``None`` norm_name: default normalization function, will be overridden by the same key in ``head_args``. Default: ``"BN"`` act_name: default activation function, will be overridden by the same key in ``head_args``. Default: ``"relu"`` Returns: A head. """ if head_args is None: return None head_args = copy.deepcopy(head_args) head_name = head_args.pop("name", None) if head_name is None: return None head_args["w_in"] = w_in head_args.setdefault("norm_name", norm_name) head_args.setdefault("act_name", act_name) if callable(head_name): return head_name(**head_args) if isinstance(head_name, str): head_funcs = { "ClsHead": ClsHead, "MBV3Head": MBV3Head, "VGGHead": VGGHead, } if head_name in head_funcs: return head_funcs[head_name](**head_args) raise ValueError(f"Head '{head_name}' not supported") class ClsHead(M.Module): """Cls head: Conv, BN, Act, AvgPool, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for first conv in head, conv will be omitted if set to 0. Default: ``0`` dropout_prob: dropout probability. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"relu"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 0, dropout_prob: float = 0.0, norm_name: str = "BN", act_name: str = "relu", bias: bool = True, ): super().__init__() self.width = width if self.width > 0: self.conv = conv2d(w_in, self.width, 1) self.bn = norm2d(norm_name, self.width) self.act = activation(act_name) w_in = self.width self.avg_pool = gap2d() if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_in, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: if self.width > 0: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) x = F.flatten(x, 1) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class MBV3Head(M.Module): """MobileNet V3 head: Conv, BN, Act, AvgPool, SE, FC, Act, FC. Args: w_in: input width. w_out: output width, normally the number of classes. width: width for first conv in head. w_h: width for first linear in head. dropout_prob: dropout probability. Default: ``0.0`` se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"hswish"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 960, w_h: int = 1280, dropout_prob: float = 0.0, se_r: float = 0.0, norm_name: str = "BN", act_name: str = "hswish", bias: bool = True, ): super().__init__() self.conv = conv2d(w_in, width, 1) self.bn = norm2d(norm_name, width) self.act = activation(act_name) self.avg_pool = gap2d() if se_r > 0.0: self.se = SE(width, int(se_r * width), act_name) self.h_fc = linear(width, w_h, bias=bias) self.h_act = activation(act_name) if dropout_prob > 0.0: self.dropout =
M.Dropout(dropout_prob)
megengine.module.Dropout
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import copy from typing import Any, Mapping import megengine as mge import megengine.functional as F import megengine.module as M from .modules import SE, activation, conv2d, gap2d, linear, norm2d __all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"] def build_head( w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu" ) -> M.Module: """The factory function to build head. Note: if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do nothing and return ``None``. Args: w_in: input width. head_args: head args. Default: ``None`` norm_name: default normalization function, will be overridden by the same key in ``head_args``. Default: ``"BN"`` act_name: default activation function, will be overridden by the same key in ``head_args``. Default: ``"relu"`` Returns: A head. """ if head_args is None: return None head_args = copy.deepcopy(head_args) head_name = head_args.pop("name", None) if head_name is None: return None head_args["w_in"] = w_in head_args.setdefault("norm_name", norm_name) head_args.setdefault("act_name", act_name) if callable(head_name): return head_name(**head_args) if isinstance(head_name, str): head_funcs = { "ClsHead": ClsHead, "MBV3Head": MBV3Head, "VGGHead": VGGHead, } if head_name in head_funcs: return head_funcs[head_name](**head_args) raise ValueError(f"Head '{head_name}' not supported") class ClsHead(M.Module): """Cls head: Conv, BN, Act, AvgPool, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for first conv in head, conv will be omitted if set to 0. Default: ``0`` dropout_prob: dropout probability. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"relu"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 0, dropout_prob: float = 0.0, norm_name: str = "BN", act_name: str = "relu", bias: bool = True, ): super().__init__() self.width = width if self.width > 0: self.conv = conv2d(w_in, self.width, 1) self.bn = norm2d(norm_name, self.width) self.act = activation(act_name) w_in = self.width self.avg_pool = gap2d() if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_in, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: if self.width > 0: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) x = F.flatten(x, 1) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class MBV3Head(M.Module): """MobileNet V3 head: Conv, BN, Act, AvgPool, SE, FC, Act, FC. Args: w_in: input width. w_out: output width, normally the number of classes. width: width for first conv in head. w_h: width for first linear in head. dropout_prob: dropout probability. Default: ``0.0`` se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"hswish"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 960, w_h: int = 1280, dropout_prob: float = 0.0, se_r: float = 0.0, norm_name: str = "BN", act_name: str = "hswish", bias: bool = True, ): super().__init__() self.conv = conv2d(w_in, width, 1) self.bn = norm2d(norm_name, width) self.act = activation(act_name) self.avg_pool = gap2d() if se_r > 0.0: self.se = SE(width, int(se_r * width), act_name) self.h_fc = linear(width, w_h, bias=bias) self.h_act = activation(act_name) if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_h, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) if getattr(self, "se", None) is not None: x = self.se(x) x = F.flatten(x, 1) x = self.h_fc(x) x = self.h_act(x) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class VGGHead(M.Module): """VGG head: AvgPool, [FC, Act, Dropout] x2, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for linear in head. Default: ``4096`` dropout_prob: dropout probability. Default: ``0.5`` act_name: activation function. Default: ``"relu"`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 4096, dropout_prob: float = 0.5, act_name: str = "relu", **kwargs, ): super().__init__() self.avg_pool = gap2d(7) self.classifier = M.Sequential( linear(w_in * 7 * 7, width, bias=True), activation(act_name),
M.Dropout(dropout_prob)
megengine.module.Dropout
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import copy from typing import Any, Mapping import megengine as mge import megengine.functional as F import megengine.module as M from .modules import SE, activation, conv2d, gap2d, linear, norm2d __all__ = ["build_head", "ClsHead", "MBV3Head", "VGGHead"] def build_head( w_in: int, head_args: Mapping[str, Any] = None, norm_name: str = "BN", act_name: str = "relu" ) -> M.Module: """The factory function to build head. Note: if ``head_args`` is ``None`` or ``head_args["name"]`` is ``None``, this function will do nothing and return ``None``. Args: w_in: input width. head_args: head args. Default: ``None`` norm_name: default normalization function, will be overridden by the same key in ``head_args``. Default: ``"BN"`` act_name: default activation function, will be overridden by the same key in ``head_args``. Default: ``"relu"`` Returns: A head. """ if head_args is None: return None head_args = copy.deepcopy(head_args) head_name = head_args.pop("name", None) if head_name is None: return None head_args["w_in"] = w_in head_args.setdefault("norm_name", norm_name) head_args.setdefault("act_name", act_name) if callable(head_name): return head_name(**head_args) if isinstance(head_name, str): head_funcs = { "ClsHead": ClsHead, "MBV3Head": MBV3Head, "VGGHead": VGGHead, } if head_name in head_funcs: return head_funcs[head_name](**head_args) raise ValueError(f"Head '{head_name}' not supported") class ClsHead(M.Module): """Cls head: Conv, BN, Act, AvgPool, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for first conv in head, conv will be omitted if set to 0. Default: ``0`` dropout_prob: dropout probability. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"relu"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 0, dropout_prob: float = 0.0, norm_name: str = "BN", act_name: str = "relu", bias: bool = True, ): super().__init__() self.width = width if self.width > 0: self.conv = conv2d(w_in, self.width, 1) self.bn = norm2d(norm_name, self.width) self.act = activation(act_name) w_in = self.width self.avg_pool = gap2d() if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_in, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: if self.width > 0: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) x = F.flatten(x, 1) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class MBV3Head(M.Module): """MobileNet V3 head: Conv, BN, Act, AvgPool, SE, FC, Act, FC. Args: w_in: input width. w_out: output width, normally the number of classes. width: width for first conv in head. w_h: width for first linear in head. dropout_prob: dropout probability. Default: ``0.0`` se_r: Squeeze-and-Excitation (SE) ratio. Default: ``0.0`` norm_name: normalization function. Default: ``"BN"`` act_name: activation function. Default: ``"hswish"`` bias: whether fc has bias. Default: ``True`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 960, w_h: int = 1280, dropout_prob: float = 0.0, se_r: float = 0.0, norm_name: str = "BN", act_name: str = "hswish", bias: bool = True, ): super().__init__() self.conv = conv2d(w_in, width, 1) self.bn = norm2d(norm_name, width) self.act = activation(act_name) self.avg_pool = gap2d() if se_r > 0.0: self.se = SE(width, int(se_r * width), act_name) self.h_fc = linear(width, w_h, bias=bias) self.h_act = activation(act_name) if dropout_prob > 0.0: self.dropout = M.Dropout(dropout_prob) self.fc = linear(w_h, w_out, bias=bias) def forward(self, x: mge.Tensor) -> mge.Tensor: x = self.conv(x) x = self.bn(x) x = self.act(x) x = self.avg_pool(x) if getattr(self, "se", None) is not None: x = self.se(x) x = F.flatten(x, 1) x = self.h_fc(x) x = self.h_act(x) if getattr(self, "dropout", None) is not None: x = self.dropout(x) x = self.fc(x) return x class VGGHead(M.Module): """VGG head: AvgPool, [FC, Act, Dropout] x2, FC. Args: w_in: input width. w_out: output width, normally the number of classes. Default: ``1000`` width: width for linear in head. Default: ``4096`` dropout_prob: dropout probability. Default: ``0.5`` act_name: activation function. Default: ``"relu"`` """ def __init__( self, w_in: int, w_out: int = 1000, width: int = 4096, dropout_prob: float = 0.5, act_name: str = "relu", **kwargs, ): super().__init__() self.avg_pool = gap2d(7) self.classifier = M.Sequential( linear(w_in * 7 * 7, width, bias=True), activation(act_name), M.Dropout(dropout_prob), linear(width, width, bias=True), activation(act_name),
M.Dropout(dropout_prob)
megengine.module.Dropout
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor =
Tensor([0, 0, self.base_size - 1, self.base_size - 1])
megengine.Tensor
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws =
F.sqrt(size_ratios)
megengine.functional.sqrt
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws = F.sqrt(size_ratios) hs = ws * self.anchor_ratios # ws = size_ratios.sqrt().round() # hs = (ws * self.anchor_ratios).round() # scale enumerate anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32) ws =
F.expand_dims(ws, 1)
megengine.functional.expand_dims
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws = F.sqrt(size_ratios) hs = ws * self.anchor_ratios # ws = size_ratios.sqrt().round() # hs = (ws * self.anchor_ratios).round() # scale enumerate anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32) ws = F.expand_dims(ws, 1) hs =
F.expand_dims(hs, 1)
megengine.functional.expand_dims
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws = F.sqrt(size_ratios) hs = ws * self.anchor_ratios # ws = size_ratios.sqrt().round() # hs = (ws * self.anchor_ratios).round() # scale enumerate anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32) ws = F.expand_dims(ws, 1) hs = F.expand_dims(hs, 1) ws = (ws * anchor_scales).reshape(-1, 1) hs = (hs * anchor_scales).reshape(-1, 1) # make anchors anchors = F.concat( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1), ], axis=1, ) return anchors.astype(np.float32) def get_center_offsets(self, featmap, stride): # f_shp = featmap.shape # fm_height, fm_width = f_shp[-2], f_shp[-1] fm_height, fm_width = featmap.shape[2:] shift_x = F.linspace(0, fm_width - 1, fm_width) * stride shift_y = F.linspace(0, fm_height - 1, fm_height) * stride # make the mesh grid of shift_x and shift_y mesh_shape = (fm_height, fm_width) broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), mesh_shape) broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), mesh_shape) # broad_shift_x = shift_x.reshape(-1, shift_x.shape[0]).broadcast_to(*mesh_shape) # broad_shift_y = shift_y.reshape(shift_y.shape[0], -1).broadcast_to(*mesh_shape) flatten_shift_x = broad_shift_x.flatten() flatten_shift_y = broad_shift_y.flatten() shifts =
F.stack([flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y], axis=1)
megengine.functional.stack
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws = F.sqrt(size_ratios) hs = ws * self.anchor_ratios # ws = size_ratios.sqrt().round() # hs = (ws * self.anchor_ratios).round() # scale enumerate anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32) ws = F.expand_dims(ws, 1) hs = F.expand_dims(hs, 1) ws = (ws * anchor_scales).reshape(-1, 1) hs = (hs * anchor_scales).reshape(-1, 1) # make anchors anchors = F.concat( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1), ], axis=1, ) return anchors.astype(np.float32) def get_center_offsets(self, featmap, stride): # f_shp = featmap.shape # fm_height, fm_width = f_shp[-2], f_shp[-1] fm_height, fm_width = featmap.shape[2:] shift_x =
F.linspace(0, fm_width - 1, fm_width)
megengine.functional.linspace
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws = F.sqrt(size_ratios) hs = ws * self.anchor_ratios # ws = size_ratios.sqrt().round() # hs = (ws * self.anchor_ratios).round() # scale enumerate anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32) ws = F.expand_dims(ws, 1) hs = F.expand_dims(hs, 1) ws = (ws * anchor_scales).reshape(-1, 1) hs = (hs * anchor_scales).reshape(-1, 1) # make anchors anchors = F.concat( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1), ], axis=1, ) return anchors.astype(np.float32) def get_center_offsets(self, featmap, stride): # f_shp = featmap.shape # fm_height, fm_width = f_shp[-2], f_shp[-1] fm_height, fm_width = featmap.shape[2:] shift_x = F.linspace(0, fm_width - 1, fm_width) * stride shift_y =
F.linspace(0, fm_height - 1, fm_height)
megengine.functional.linspace
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws = F.sqrt(size_ratios) hs = ws * self.anchor_ratios # ws = size_ratios.sqrt().round() # hs = (ws * self.anchor_ratios).round() # scale enumerate anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32) ws = F.expand_dims(ws, 1) hs = F.expand_dims(hs, 1) ws = (ws * anchor_scales).reshape(-1, 1) hs = (hs * anchor_scales).reshape(-1, 1) # make anchors anchors = F.concat( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1), ], axis=1, ) return anchors.astype(np.float32) def get_center_offsets(self, featmap, stride): # f_shp = featmap.shape # fm_height, fm_width = f_shp[-2], f_shp[-1] fm_height, fm_width = featmap.shape[2:] shift_x = F.linspace(0, fm_width - 1, fm_width) * stride shift_y = F.linspace(0, fm_height - 1, fm_height) * stride # make the mesh grid of shift_x and shift_y mesh_shape = (fm_height, fm_width) broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), mesh_shape) broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), mesh_shape) # broad_shift_x = shift_x.reshape(-1, shift_x.shape[0]).broadcast_to(*mesh_shape) # broad_shift_y = shift_y.reshape(shift_y.shape[0], -1).broadcast_to(*mesh_shape) flatten_shift_x = broad_shift_x.flatten() flatten_shift_y = broad_shift_y.flatten() shifts = F.stack([flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y], axis=1) # flatten_shift_x = F.add_axis(broad_shift_x.reshape(-1), 1) # flatten_shift_y = F.add_axis(broad_shift_y.reshape(-1), 1) # shifts = F.concat( # [flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y,], # axis=1) return shifts def get_anchors_by_feature(self, featmap, stride): # shifts shape: [A, 4] shifts = self.get_center_offsets(featmap, stride) # plane_anchors shape: [B, 4], e.g. B=3 plane_anchors = self.get_plane_anchors(self.base_scale * stride) # all_anchors = shifts.repeat(1,3) + cell_anchors.flatten() all_anchors =
F.expand_dims(plane_anchors, 0)
megengine.functional.expand_dims
import numpy as np from megengine import Tensor import megengine.functional as F import pdb class AnchorGenerator(): """default anchor generator for fpn. This class generate anchors by feature map in level. """ def __init__(self, base_size=16, ratios=[0.5, 1, 2], base_scale=2): self.base_size = base_size self.base_scale = np.array(base_scale) self.anchor_ratios = ratios def _whctrs(self, anchor): """convert anchor box into (w, h, ctr_x, ctr_y) """ w = anchor[:, 2] - anchor[:, 0] + 1 h = anchor[:, 3] - anchor[:, 1] + 1 x_ctr = anchor[:, 0] + 0.5 * (w - 1) y_ctr = anchor[:, 1] + 0.5 * (h - 1) return w, h, x_ctr, y_ctr def get_plane_anchors(self, anchor_scales: np.ndarray): """get anchors per location on feature map. The anchor number is anchor_scales x anchor_ratios """ base_anchor = Tensor([0, 0, self.base_size - 1, self.base_size - 1]) base_anchor = base_anchor.reshape(1, -1) w, h, x_ctr, y_ctr = self._whctrs(base_anchor) # ratio enumerate size = w * h size_ratios = size / self.anchor_ratios #pdb.set_trace() ws = F.sqrt(size_ratios) hs = ws * self.anchor_ratios # ws = size_ratios.sqrt().round() # hs = (ws * self.anchor_ratios).round() # scale enumerate anchor_scales = anchor_scales.reshape(1, -1).astype(np.float32) ws = F.expand_dims(ws, 1) hs = F.expand_dims(hs, 1) ws = (ws * anchor_scales).reshape(-1, 1) hs = (hs * anchor_scales).reshape(-1, 1) # make anchors anchors = F.concat( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1), ], axis=1, ) return anchors.astype(np.float32) def get_center_offsets(self, featmap, stride): # f_shp = featmap.shape # fm_height, fm_width = f_shp[-2], f_shp[-1] fm_height, fm_width = featmap.shape[2:] shift_x = F.linspace(0, fm_width - 1, fm_width) * stride shift_y = F.linspace(0, fm_height - 1, fm_height) * stride # make the mesh grid of shift_x and shift_y mesh_shape = (fm_height, fm_width) broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), mesh_shape) broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), mesh_shape) # broad_shift_x = shift_x.reshape(-1, shift_x.shape[0]).broadcast_to(*mesh_shape) # broad_shift_y = shift_y.reshape(shift_y.shape[0], -1).broadcast_to(*mesh_shape) flatten_shift_x = broad_shift_x.flatten() flatten_shift_y = broad_shift_y.flatten() shifts = F.stack([flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y], axis=1) # flatten_shift_x = F.add_axis(broad_shift_x.reshape(-1), 1) # flatten_shift_y = F.add_axis(broad_shift_y.reshape(-1), 1) # shifts = F.concat( # [flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y,], # axis=1) return shifts def get_anchors_by_feature(self, featmap, stride): # shifts shape: [A, 4] shifts = self.get_center_offsets(featmap, stride) # plane_anchors shape: [B, 4], e.g. B=3 plane_anchors = self.get_plane_anchors(self.base_scale * stride) # all_anchors = shifts.repeat(1,3) + cell_anchors.flatten() all_anchors = F.expand_dims(plane_anchors, 0) +
F.expand_dims(shifts, 1)
megengine.functional.expand_dims
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import argparse import json import os from tqdm import tqdm import megengine as mge import megengine.distributed as dist from megengine.data import DataLoader from official.vision.detection.tools.utils import ( DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file ) logger =
mge.get_logger(__name__)
megengine.get_logger
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import argparse import json import os from tqdm import tqdm import megengine as mge import megengine.distributed as dist from megengine.data import DataLoader from official.vision.detection.tools.utils import ( DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file ) logger = mge.get_logger(__name__) logger.setLevel("INFO") def make_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-f", "--file", default="net.py", type=str, help="net description file" ) parser.add_argument( "-w", "--weight_file", default=None, type=str, help="weights file", ) parser.add_argument( "-n", "--devices", default=1, type=int, help="total number of gpus for testing", ) parser.add_argument( "-d", "--dataset_dir", default="/data/datasets", type=str, ) parser.add_argument("-se", "--start_epoch", default=-1, type=int) parser.add_argument("-ee", "--end_epoch", default=-1, type=int) return parser def main(): # pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval parser = make_parser() args = parser.parse_args() current_network = import_from_file(args.file) cfg = current_network.Cfg() if args.weight_file: args.start_epoch = args.end_epoch = -1 else: if args.start_epoch == -1: args.start_epoch = cfg.max_epoch - 1 if args.end_epoch == -1: args.end_epoch = args.start_epoch assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch for epoch_num in range(args.start_epoch, args.end_epoch + 1): if args.weight_file: weight_file = args.weight_file else: weight_file = "log-of-{}/epoch_{}.pkl".format( os.path.basename(args.file).split(".")[0], epoch_num ) if args.devices > 1: dist_worker = dist.launcher(n_gpus=args.devices)(worker) result_list = dist_worker(current_network, weight_file, args.dataset_dir) result_list = sum(result_list, []) else: result_list = worker(current_network, weight_file, args.dataset_dir) all_results = DetEvaluator.format(result_list, cfg) if args.weight_file: json_path = "{}_{}.json".format( os.path.basename(args.file).split(".")[0], os.path.basename(args.weight_file).split(".")[0], ) else: json_path = "log-of-{}/epoch_{}.json".format( os.path.basename(args.file).split(".")[0], epoch_num ) all_results = json.dumps(all_results) with open(json_path, "w") as fo: fo.write(all_results) logger.info("Save results to %s, start evaluation!", json_path) eval_gt = COCO( os.path.join( args.dataset_dir, cfg.test_dataset["name"], cfg.test_dataset["ann_file"] ) ) eval_dt = eval_gt.loadRes(json_path) cocoEval = COCOeval(eval_gt, eval_dt, iouType="bbox") cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metrics = [ "AP", "[email protected]", "[email protected]", "APs", "APm", "APl", "AR@1", "AR@10", "AR@100", "ARs", "ARm", "ARl", ] logger.info("mmAP".center(32, "-")) for i, m in enumerate(metrics): logger.info("|\t%s\t|\t%.03f\t|", m, cocoEval.stats[i]) logger.info("-" * 32) def worker(current_network, weight_file, dataset_dir): cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict =
mge.load(weight_file)
megengine.load
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import argparse import json import os from tqdm import tqdm import megengine as mge import megengine.distributed as dist from megengine.data import DataLoader from official.vision.detection.tools.utils import ( DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file ) logger = mge.get_logger(__name__) logger.setLevel("INFO") def make_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-f", "--file", default="net.py", type=str, help="net description file" ) parser.add_argument( "-w", "--weight_file", default=None, type=str, help="weights file", ) parser.add_argument( "-n", "--devices", default=1, type=int, help="total number of gpus for testing", ) parser.add_argument( "-d", "--dataset_dir", default="/data/datasets", type=str, ) parser.add_argument("-se", "--start_epoch", default=-1, type=int) parser.add_argument("-ee", "--end_epoch", default=-1, type=int) return parser def main(): # pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval parser = make_parser() args = parser.parse_args() current_network = import_from_file(args.file) cfg = current_network.Cfg() if args.weight_file: args.start_epoch = args.end_epoch = -1 else: if args.start_epoch == -1: args.start_epoch = cfg.max_epoch - 1 if args.end_epoch == -1: args.end_epoch = args.start_epoch assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch for epoch_num in range(args.start_epoch, args.end_epoch + 1): if args.weight_file: weight_file = args.weight_file else: weight_file = "log-of-{}/epoch_{}.pkl".format( os.path.basename(args.file).split(".")[0], epoch_num ) if args.devices > 1: dist_worker = dist.launcher(n_gpus=args.devices)(worker) result_list = dist_worker(current_network, weight_file, args.dataset_dir) result_list = sum(result_list, []) else: result_list = worker(current_network, weight_file, args.dataset_dir) all_results = DetEvaluator.format(result_list, cfg) if args.weight_file: json_path = "{}_{}.json".format( os.path.basename(args.file).split(".")[0], os.path.basename(args.weight_file).split(".")[0], ) else: json_path = "log-of-{}/epoch_{}.json".format( os.path.basename(args.file).split(".")[0], epoch_num ) all_results = json.dumps(all_results) with open(json_path, "w") as fo: fo.write(all_results) logger.info("Save results to %s, start evaluation!", json_path) eval_gt = COCO( os.path.join( args.dataset_dir, cfg.test_dataset["name"], cfg.test_dataset["ann_file"] ) ) eval_dt = eval_gt.loadRes(json_path) cocoEval = COCOeval(eval_gt, eval_dt, iouType="bbox") cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metrics = [ "AP", "[email protected]", "[email protected]", "APs", "APm", "APl", "AR@1", "AR@10", "AR@100", "ARs", "ARm", "ARl", ] logger.info("mmAP".center(32, "-")) for i, m in enumerate(metrics): logger.info("|\t%s\t|\t%.03f\t|", m, cocoEval.stats[i]) logger.info("-" * 32) def worker(current_network, weight_file, dataset_dir): cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict = mge.load(weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) test_loader = build_dataloader(dataset_dir, model.cfg) if dist.get_rank() == 0: test_loader = tqdm(test_loader) result_list = [] for data in test_loader: image, im_info = DetEvaluator.process_inputs( data[0][0], model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) pred_res = evaluator.predict( image=mge.tensor(image), im_info=mge.tensor(im_info) ) result = { "pred_boxes": pred_res, "image_id": int(data[1][2][0].split(".")[0].split("_")[-1]), } result_list.append(result) return result_list # pylint: disable=unused-argument def build_dataloader(dataset_dir, cfg): val_dataset = PseudoDetectionDataset(length=5000, order=["image", "info"]) val_sampler = InferenceSampler(val_dataset, 1) val_dataloader =
DataLoader(val_dataset, sampler=val_sampler, num_workers=2)
megengine.data.DataLoader
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import argparse import json import os from tqdm import tqdm import megengine as mge import megengine.distributed as dist from megengine.data import DataLoader from official.vision.detection.tools.utils import ( DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file ) logger = mge.get_logger(__name__) logger.setLevel("INFO") def make_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-f", "--file", default="net.py", type=str, help="net description file" ) parser.add_argument( "-w", "--weight_file", default=None, type=str, help="weights file", ) parser.add_argument( "-n", "--devices", default=1, type=int, help="total number of gpus for testing", ) parser.add_argument( "-d", "--dataset_dir", default="/data/datasets", type=str, ) parser.add_argument("-se", "--start_epoch", default=-1, type=int) parser.add_argument("-ee", "--end_epoch", default=-1, type=int) return parser def main(): # pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval parser = make_parser() args = parser.parse_args() current_network = import_from_file(args.file) cfg = current_network.Cfg() if args.weight_file: args.start_epoch = args.end_epoch = -1 else: if args.start_epoch == -1: args.start_epoch = cfg.max_epoch - 1 if args.end_epoch == -1: args.end_epoch = args.start_epoch assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch for epoch_num in range(args.start_epoch, args.end_epoch + 1): if args.weight_file: weight_file = args.weight_file else: weight_file = "log-of-{}/epoch_{}.pkl".format( os.path.basename(args.file).split(".")[0], epoch_num ) if args.devices > 1: dist_worker = dist.launcher(n_gpus=args.devices)(worker) result_list = dist_worker(current_network, weight_file, args.dataset_dir) result_list = sum(result_list, []) else: result_list = worker(current_network, weight_file, args.dataset_dir) all_results = DetEvaluator.format(result_list, cfg) if args.weight_file: json_path = "{}_{}.json".format( os.path.basename(args.file).split(".")[0], os.path.basename(args.weight_file).split(".")[0], ) else: json_path = "log-of-{}/epoch_{}.json".format( os.path.basename(args.file).split(".")[0], epoch_num ) all_results = json.dumps(all_results) with open(json_path, "w") as fo: fo.write(all_results) logger.info("Save results to %s, start evaluation!", json_path) eval_gt = COCO( os.path.join( args.dataset_dir, cfg.test_dataset["name"], cfg.test_dataset["ann_file"] ) ) eval_dt = eval_gt.loadRes(json_path) cocoEval = COCOeval(eval_gt, eval_dt, iouType="bbox") cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metrics = [ "AP", "[email protected]", "[email protected]", "APs", "APm", "APl", "AR@1", "AR@10", "AR@100", "ARs", "ARm", "ARl", ] logger.info("mmAP".center(32, "-")) for i, m in enumerate(metrics): logger.info("|\t%s\t|\t%.03f\t|", m, cocoEval.stats[i]) logger.info("-" * 32) def worker(current_network, weight_file, dataset_dir): cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict = mge.load(weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) test_loader = build_dataloader(dataset_dir, model.cfg) if
dist.get_rank()
megengine.distributed.get_rank
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import argparse import json import os from tqdm import tqdm import megengine as mge import megengine.distributed as dist from megengine.data import DataLoader from official.vision.detection.tools.utils import ( DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file ) logger = mge.get_logger(__name__) logger.setLevel("INFO") def make_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-f", "--file", default="net.py", type=str, help="net description file" ) parser.add_argument( "-w", "--weight_file", default=None, type=str, help="weights file", ) parser.add_argument( "-n", "--devices", default=1, type=int, help="total number of gpus for testing", ) parser.add_argument( "-d", "--dataset_dir", default="/data/datasets", type=str, ) parser.add_argument("-se", "--start_epoch", default=-1, type=int) parser.add_argument("-ee", "--end_epoch", default=-1, type=int) return parser def main(): # pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval parser = make_parser() args = parser.parse_args() current_network = import_from_file(args.file) cfg = current_network.Cfg() if args.weight_file: args.start_epoch = args.end_epoch = -1 else: if args.start_epoch == -1: args.start_epoch = cfg.max_epoch - 1 if args.end_epoch == -1: args.end_epoch = args.start_epoch assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch for epoch_num in range(args.start_epoch, args.end_epoch + 1): if args.weight_file: weight_file = args.weight_file else: weight_file = "log-of-{}/epoch_{}.pkl".format( os.path.basename(args.file).split(".")[0], epoch_num ) if args.devices > 1: dist_worker =
dist.launcher(n_gpus=args.devices)
megengine.distributed.launcher
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import argparse import json import os from tqdm import tqdm import megengine as mge import megengine.distributed as dist from megengine.data import DataLoader from official.vision.detection.tools.utils import ( DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file ) logger = mge.get_logger(__name__) logger.setLevel("INFO") def make_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-f", "--file", default="net.py", type=str, help="net description file" ) parser.add_argument( "-w", "--weight_file", default=None, type=str, help="weights file", ) parser.add_argument( "-n", "--devices", default=1, type=int, help="total number of gpus for testing", ) parser.add_argument( "-d", "--dataset_dir", default="/data/datasets", type=str, ) parser.add_argument("-se", "--start_epoch", default=-1, type=int) parser.add_argument("-ee", "--end_epoch", default=-1, type=int) return parser def main(): # pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval parser = make_parser() args = parser.parse_args() current_network = import_from_file(args.file) cfg = current_network.Cfg() if args.weight_file: args.start_epoch = args.end_epoch = -1 else: if args.start_epoch == -1: args.start_epoch = cfg.max_epoch - 1 if args.end_epoch == -1: args.end_epoch = args.start_epoch assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch for epoch_num in range(args.start_epoch, args.end_epoch + 1): if args.weight_file: weight_file = args.weight_file else: weight_file = "log-of-{}/epoch_{}.pkl".format( os.path.basename(args.file).split(".")[0], epoch_num ) if args.devices > 1: dist_worker = dist.launcher(n_gpus=args.devices)(worker) result_list = dist_worker(current_network, weight_file, args.dataset_dir) result_list = sum(result_list, []) else: result_list = worker(current_network, weight_file, args.dataset_dir) all_results = DetEvaluator.format(result_list, cfg) if args.weight_file: json_path = "{}_{}.json".format( os.path.basename(args.file).split(".")[0], os.path.basename(args.weight_file).split(".")[0], ) else: json_path = "log-of-{}/epoch_{}.json".format( os.path.basename(args.file).split(".")[0], epoch_num ) all_results = json.dumps(all_results) with open(json_path, "w") as fo: fo.write(all_results) logger.info("Save results to %s, start evaluation!", json_path) eval_gt = COCO( os.path.join( args.dataset_dir, cfg.test_dataset["name"], cfg.test_dataset["ann_file"] ) ) eval_dt = eval_gt.loadRes(json_path) cocoEval = COCOeval(eval_gt, eval_dt, iouType="bbox") cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metrics = [ "AP", "[email protected]", "[email protected]", "APs", "APm", "APl", "AR@1", "AR@10", "AR@100", "ARs", "ARm", "ARl", ] logger.info("mmAP".center(32, "-")) for i, m in enumerate(metrics): logger.info("|\t%s\t|\t%.03f\t|", m, cocoEval.stats[i]) logger.info("-" * 32) def worker(current_network, weight_file, dataset_dir): cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict = mge.load(weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) test_loader = build_dataloader(dataset_dir, model.cfg) if dist.get_rank() == 0: test_loader = tqdm(test_loader) result_list = [] for data in test_loader: image, im_info = DetEvaluator.process_inputs( data[0][0], model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) pred_res = evaluator.predict( image=
mge.tensor(image)
megengine.tensor
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import argparse import json import os from tqdm import tqdm import megengine as mge import megengine.distributed as dist from megengine.data import DataLoader from official.vision.detection.tools.utils import ( DetEvaluator, InferenceSampler, PseudoDetectionDataset, import_from_file ) logger = mge.get_logger(__name__) logger.setLevel("INFO") def make_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-f", "--file", default="net.py", type=str, help="net description file" ) parser.add_argument( "-w", "--weight_file", default=None, type=str, help="weights file", ) parser.add_argument( "-n", "--devices", default=1, type=int, help="total number of gpus for testing", ) parser.add_argument( "-d", "--dataset_dir", default="/data/datasets", type=str, ) parser.add_argument("-se", "--start_epoch", default=-1, type=int) parser.add_argument("-ee", "--end_epoch", default=-1, type=int) return parser def main(): # pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval parser = make_parser() args = parser.parse_args() current_network = import_from_file(args.file) cfg = current_network.Cfg() if args.weight_file: args.start_epoch = args.end_epoch = -1 else: if args.start_epoch == -1: args.start_epoch = cfg.max_epoch - 1 if args.end_epoch == -1: args.end_epoch = args.start_epoch assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch for epoch_num in range(args.start_epoch, args.end_epoch + 1): if args.weight_file: weight_file = args.weight_file else: weight_file = "log-of-{}/epoch_{}.pkl".format( os.path.basename(args.file).split(".")[0], epoch_num ) if args.devices > 1: dist_worker = dist.launcher(n_gpus=args.devices)(worker) result_list = dist_worker(current_network, weight_file, args.dataset_dir) result_list = sum(result_list, []) else: result_list = worker(current_network, weight_file, args.dataset_dir) all_results = DetEvaluator.format(result_list, cfg) if args.weight_file: json_path = "{}_{}.json".format( os.path.basename(args.file).split(".")[0], os.path.basename(args.weight_file).split(".")[0], ) else: json_path = "log-of-{}/epoch_{}.json".format( os.path.basename(args.file).split(".")[0], epoch_num ) all_results = json.dumps(all_results) with open(json_path, "w") as fo: fo.write(all_results) logger.info("Save results to %s, start evaluation!", json_path) eval_gt = COCO( os.path.join( args.dataset_dir, cfg.test_dataset["name"], cfg.test_dataset["ann_file"] ) ) eval_dt = eval_gt.loadRes(json_path) cocoEval = COCOeval(eval_gt, eval_dt, iouType="bbox") cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metrics = [ "AP", "[email protected]", "[email protected]", "APs", "APm", "APl", "AR@1", "AR@10", "AR@100", "ARs", "ARm", "ARl", ] logger.info("mmAP".center(32, "-")) for i, m in enumerate(metrics): logger.info("|\t%s\t|\t%.03f\t|", m, cocoEval.stats[i]) logger.info("-" * 32) def worker(current_network, weight_file, dataset_dir): cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict = mge.load(weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) test_loader = build_dataloader(dataset_dir, model.cfg) if dist.get_rank() == 0: test_loader = tqdm(test_loader) result_list = [] for data in test_loader: image, im_info = DetEvaluator.process_inputs( data[0][0], model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) pred_res = evaluator.predict( image=mge.tensor(image), im_info=
mge.tensor(im_info)
megengine.tensor
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(
mge.random.normal(size=(2, 4, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("drop_prob", [0.0, 0.5]) def test_drop_path(drop_prob): m = DropPath(drop_prob) assert isinstance(m, M.Module) m.training = True m(
mge.random.normal(size=(2, 4, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("drop_prob", [0.0, 0.5]) def test_drop_path(drop_prob): m = DropPath(drop_prob) assert isinstance(m, M.Module) m.training = True m(mge.random.normal(size=(2, 4, 8, 8))) m.training = False x = np.random.rand(2, 4, 8, 8).astype("float32") y = m(mge.Tensor(x)).numpy() np.testing.assert_allclose(y, x, rtol=1e-4, atol=1e-6) @pytest.mark.parametrize("shape", [1, (7, 7)]) def test_gap2d(shape): m = gap2d(shape) assert isinstance(m, M.AdaptiveAvgPool2d) m(
mge.random.normal(size=(2, 4, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("drop_prob", [0.0, 0.5]) def test_drop_path(drop_prob): m = DropPath(drop_prob) assert isinstance(m, M.Module) m.training = True m(mge.random.normal(size=(2, 4, 8, 8))) m.training = False x = np.random.rand(2, 4, 8, 8).astype("float32") y = m(mge.Tensor(x)).numpy() np.testing.assert_allclose(y, x, rtol=1e-4, atol=1e-6) @pytest.mark.parametrize("shape", [1, (7, 7)]) def test_gap2d(shape): m = gap2d(shape) assert isinstance(m, M.AdaptiveAvgPool2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("bias", [True, False]) def test_linear(w_in, w_out, bias): m = linear(w_in, w_out, bias=bias) assert isinstance(m, M.Linear) m(
mge.random.normal(size=(2, 8, 4))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("drop_prob", [0.0, 0.5]) def test_drop_path(drop_prob): m = DropPath(drop_prob) assert isinstance(m, M.Module) m.training = True m(mge.random.normal(size=(2, 4, 8, 8))) m.training = False x = np.random.rand(2, 4, 8, 8).astype("float32") y = m(mge.Tensor(x)).numpy() np.testing.assert_allclose(y, x, rtol=1e-4, atol=1e-6) @pytest.mark.parametrize("shape", [1, (7, 7)]) def test_gap2d(shape): m = gap2d(shape) assert isinstance(m, M.AdaptiveAvgPool2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("bias", [True, False]) def test_linear(w_in, w_out, bias): m = linear(w_in, w_out, bias=bias) assert isinstance(m, M.Linear) m(mge.random.normal(size=(2, 8, 4))) # TODO: "GN", "IN" and "LN" need different hyper-parameters @pytest.mark.parametrize("name", [None, "BN", "SyncBN"]) @pytest.mark.parametrize("w_in", [4]) def test_norm2d(name, w_in): m = norm2d(name, w_in) assert isinstance(m, M.Module) m(
mge.random.normal(size=(2, 4, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("drop_prob", [0.0, 0.5]) def test_drop_path(drop_prob): m = DropPath(drop_prob) assert isinstance(m, M.Module) m.training = True m(mge.random.normal(size=(2, 4, 8, 8))) m.training = False x = np.random.rand(2, 4, 8, 8).astype("float32") y = m(mge.Tensor(x)).numpy() np.testing.assert_allclose(y, x, rtol=1e-4, atol=1e-6) @pytest.mark.parametrize("shape", [1, (7, 7)]) def test_gap2d(shape): m = gap2d(shape) assert isinstance(m, M.AdaptiveAvgPool2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("bias", [True, False]) def test_linear(w_in, w_out, bias): m = linear(w_in, w_out, bias=bias) assert isinstance(m, M.Linear) m(mge.random.normal(size=(2, 8, 4))) # TODO: "GN", "IN" and "LN" need different hyper-parameters @pytest.mark.parametrize("name", [None, "BN", "SyncBN"]) @pytest.mark.parametrize("w_in", [4]) def test_norm2d(name, w_in): m = norm2d(name, w_in) assert isinstance(m, M.Module) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("name", ["avg", "max"]) def test_pool2d(k, stride, name): m = pool2d(k, stride=stride, name=name) assert isinstance(m, M.Module) m(
mge.random.normal(size=(2, 4, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("drop_prob", [0.0, 0.5]) def test_drop_path(drop_prob): m = DropPath(drop_prob) assert isinstance(m, M.Module) m.training = True m(mge.random.normal(size=(2, 4, 8, 8))) m.training = False x = np.random.rand(2, 4, 8, 8).astype("float32") y = m(mge.Tensor(x)).numpy() np.testing.assert_allclose(y, x, rtol=1e-4, atol=1e-6) @pytest.mark.parametrize("shape", [1, (7, 7)]) def test_gap2d(shape): m = gap2d(shape) assert isinstance(m, M.AdaptiveAvgPool2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("bias", [True, False]) def test_linear(w_in, w_out, bias): m = linear(w_in, w_out, bias=bias) assert isinstance(m, M.Linear) m(mge.random.normal(size=(2, 8, 4))) # TODO: "GN", "IN" and "LN" need different hyper-parameters @pytest.mark.parametrize("name", [None, "BN", "SyncBN"]) @pytest.mark.parametrize("w_in", [4]) def test_norm2d(name, w_in): m = norm2d(name, w_in) assert isinstance(m, M.Module) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("name", ["avg", "max"]) def test_pool2d(k, stride, name): m = pool2d(k, stride=stride, name=name) assert isinstance(m, M.Module) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("w_in", [8]) @pytest.mark.parametrize("w_se", [4]) @pytest.mark.parametrize("act_name", ["relu", "silu"]) def test_se(w_in, w_se, act_name): m = SE(w_in, w_se, act_name) assert isinstance(m, M.Module) m(
mge.random.normal(size=(2, 8, 8, 8))
megengine.random.normal
#!/usr/bin/env python3 # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import megengine as mge import megengine.module as M import numpy as np import pytest from basecls.layers import SE, DropPath, conv2d, gap2d, linear, norm2d, pool2d @pytest.mark.parametrize("w_in", [4]) @pytest.mark.parametrize("w_out", [8]) @pytest.mark.parametrize("k", [3, 5]) @pytest.mark.parametrize("stride", [1, 2]) @pytest.mark.parametrize("dilation", [1, 2]) @pytest.mark.parametrize("groups", [1, 2]) @pytest.mark.parametrize("bias", [True, False]) def test_conv2d(w_in, w_out, k, stride, dilation, groups, bias): m = conv2d(w_in, w_out, k, stride=stride, dilation=dilation, groups=groups, bias=bias) assert isinstance(m, M.Conv2d) m(mge.random.normal(size=(2, 4, 8, 8))) @pytest.mark.parametrize("drop_prob", [0.0, 0.5]) def test_drop_path(drop_prob): m = DropPath(drop_prob) assert isinstance(m, M.Module) m.training = True m(mge.random.normal(size=(2, 4, 8, 8))) m.training = False x = np.random.rand(2, 4, 8, 8).astype("float32") y = m(
mge.Tensor(x)
megengine.Tensor
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x =
mge.tensor(x)
megengine.tensor
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m =
MinMaxObserver()
megengine.quantization.observer.MinMaxObserver
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m =
ExponentialMovingAverageObserver(momentum=t)
megengine.quantization.observer.ExponentialMovingAverageObserver
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(mge.tensor(x1, dtype=np.float32)) m(mge.tensor(x2, dtype=np.float32)) np.testing.assert_allclose(m.min_val.numpy(), expected_min) np.testing.assert_allclose(m.max_val.numpy(), expected_max) def test_passive_observer(): q_dict = {"scale": mge.tensor(1.0)} m =
PassiveObserver(q_dict, "qint8")
megengine.quantization.observer.PassiveObserver
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(mge.tensor(x1, dtype=np.float32)) m(mge.tensor(x2, dtype=np.float32)) np.testing.assert_allclose(m.min_val.numpy(), expected_min) np.testing.assert_allclose(m.max_val.numpy(), expected_max) def test_passive_observer(): q_dict = {"scale": mge.tensor(1.0)} m = PassiveObserver(q_dict, "qint8") assert m.orig_scale == 1.0 assert m.scale == 1.0 m.scale = 2.0 assert m.scale == 2.0 assert m.get_qparams() == {"scale": mge.tensor(2.0)} @pytest.mark.skipif( platform.system() == "Darwin", reason="do not imp GPU mode at macos now" ) @pytest.mark.skipif( platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" ) @pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") @pytest.mark.isolated_distributed def test_sync_min_max_observer(): word_size =
get_device_count_by_fork("gpu")
megengine.distributed.helper.get_device_count_by_fork
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(mge.tensor(x1, dtype=np.float32)) m(mge.tensor(x2, dtype=np.float32)) np.testing.assert_allclose(m.min_val.numpy(), expected_min) np.testing.assert_allclose(m.max_val.numpy(), expected_max) def test_passive_observer(): q_dict = {"scale": mge.tensor(1.0)} m = PassiveObserver(q_dict, "qint8") assert m.orig_scale == 1.0 assert m.scale == 1.0 m.scale = 2.0 assert m.scale == 2.0 assert m.get_qparams() == {"scale": mge.tensor(2.0)} @pytest.mark.skipif( platform.system() == "Darwin", reason="do not imp GPU mode at macos now" ) @pytest.mark.skipif( platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" ) @pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") @pytest.mark.isolated_distributed def test_sync_min_max_observer(): word_size = get_device_count_by_fork("gpu") x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() @dist.launcher def worker(): rank = dist.get_rank() m = SyncMinMaxObserver() y = mge.tensor(x[rank * 3 : (rank + 1) * 3]) m(y) assert m.min_val == np_min and m.max_val == np_max worker() @pytest.mark.skipif( platform.system() == "Darwin", reason="do not imp GPU mode at macos now" ) @pytest.mark.skipif( platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" ) @pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") @pytest.mark.isolated_distributed def test_sync_exponential_moving_average_observer(): word_size =
get_device_count_by_fork("gpu")
megengine.distributed.helper.get_device_count_by_fork
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError):
Observer("qint8")
megengine.quantization.observer.Observer
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(
mge.tensor(x1, dtype=np.float32)
megengine.tensor
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(mge.tensor(x1, dtype=np.float32)) m(
mge.tensor(x2, dtype=np.float32)
megengine.tensor
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(mge.tensor(x1, dtype=np.float32)) m(mge.tensor(x2, dtype=np.float32)) np.testing.assert_allclose(m.min_val.numpy(), expected_min) np.testing.assert_allclose(m.max_val.numpy(), expected_max) def test_passive_observer(): q_dict = {"scale":
mge.tensor(1.0)
megengine.tensor
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(mge.tensor(x1, dtype=np.float32)) m(mge.tensor(x2, dtype=np.float32)) np.testing.assert_allclose(m.min_val.numpy(), expected_min) np.testing.assert_allclose(m.max_val.numpy(), expected_max) def test_passive_observer(): q_dict = {"scale": mge.tensor(1.0)} m = PassiveObserver(q_dict, "qint8") assert m.orig_scale == 1.0 assert m.scale == 1.0 m.scale = 2.0 assert m.scale == 2.0 assert m.get_qparams() == {"scale": mge.tensor(2.0)} @pytest.mark.skipif( platform.system() == "Darwin", reason="do not imp GPU mode at macos now" ) @pytest.mark.skipif( platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" ) @pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") @pytest.mark.isolated_distributed def test_sync_min_max_observer(): word_size = get_device_count_by_fork("gpu") x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() @dist.launcher def worker(): rank =
dist.get_rank()
megengine.distributed.get_rank
import platform import numpy as np import pytest import megengine as mge import megengine.distributed as dist from megengine.distributed.helper import get_device_count_by_fork from megengine.quantization.observer import ( ExponentialMovingAverageObserver, MinMaxObserver, Observer, PassiveObserver, SyncExponentialMovingAverageObserver, SyncMinMaxObserver, ) def test_observer(): with pytest.raises(TypeError): Observer("qint8") def test_min_max_observer(): x = np.random.rand(3, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() x = mge.tensor(x) m = MinMaxObserver() m(x) np.testing.assert_allclose(m.min_val.numpy(), np_min) np.testing.assert_allclose(m.max_val.numpy(), np_max) def test_exponential_moving_average_observer(): t = np.random.rand() x1 = np.random.rand(3, 3, 3, 3).astype("float32") x2 = np.random.rand(3, 3, 3, 3).astype("float32") expected_min = x1.min() * t + x2.min() * (1 - t) expected_max = x1.max() * t + x2.max() * (1 - t) m = ExponentialMovingAverageObserver(momentum=t) m(mge.tensor(x1, dtype=np.float32)) m(mge.tensor(x2, dtype=np.float32)) np.testing.assert_allclose(m.min_val.numpy(), expected_min) np.testing.assert_allclose(m.max_val.numpy(), expected_max) def test_passive_observer(): q_dict = {"scale": mge.tensor(1.0)} m = PassiveObserver(q_dict, "qint8") assert m.orig_scale == 1.0 assert m.scale == 1.0 m.scale = 2.0 assert m.scale == 2.0 assert m.get_qparams() == {"scale": mge.tensor(2.0)} @pytest.mark.skipif( platform.system() == "Darwin", reason="do not imp GPU mode at macos now" ) @pytest.mark.skipif( platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM" ) @pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device") @pytest.mark.isolated_distributed def test_sync_min_max_observer(): word_size = get_device_count_by_fork("gpu") x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32") np_min, np_max = x.min(), x.max() @dist.launcher def worker(): rank = dist.get_rank() m =
SyncMinMaxObserver()
megengine.quantization.observer.SyncMinMaxObserver