prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = | tensor(y) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = tensor(y)
out_np = np.log(np.exp(x) + np.exp(y))
out_mge = | F.logaddexp(xx, yy) | megengine.functional.logaddexp |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = | tensor(case) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = tensor(y)
out_np = np.log(np.exp(x) + np.exp(y))
out_mge = F.logaddexp(xx, yy)
np.testing.assert_almost_equal(out_np, out_mge.numpy(), decimal=6)
def test_qadd():
inp_scale = 0.5
outp_scale = 0.2
x = np.arange(6).reshape(2, 3).astype("float32")
y = np.arange(6).reshape(2, 3).astype("float32")
x = tensor(x, dtype= | dtype.qint8(inp_scale) | megengine.core.tensor.dtype.qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = tensor(y)
out_np = np.log(np.exp(x) + np.exp(y))
out_mge = F.logaddexp(xx, yy)
np.testing.assert_almost_equal(out_np, out_mge.numpy(), decimal=6)
def test_qadd():
inp_scale = 0.5
outp_scale = 0.2
x = np.arange(6).reshape(2, 3).astype("float32")
y = np.arange(6).reshape(2, 3).astype("float32")
x = tensor(x, dtype=dtype.qint8(inp_scale))
y = tensor(y, dtype= | dtype.qint8(inp_scale) | megengine.core.tensor.dtype.qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = tensor(y)
out_np = np.log(np.exp(x) + np.exp(y))
out_mge = F.logaddexp(xx, yy)
np.testing.assert_almost_equal(out_np, out_mge.numpy(), decimal=6)
def test_qadd():
inp_scale = 0.5
outp_scale = 0.2
x = np.arange(6).reshape(2, 3).astype("float32")
y = np.arange(6).reshape(2, 3).astype("float32")
x = tensor(x, dtype=dtype.qint8(inp_scale))
y = tensor(y, dtype=dtype.qint8(inp_scale))
result_mge = F.elemwise._elemwise_multi_type(
x, y, mode="qadd", dtype= | dtype.qint8(outp_scale) | megengine.core.tensor.dtype.qint8 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = tensor(y)
out_np = np.log(np.exp(x) + np.exp(y))
out_mge = F.logaddexp(xx, yy)
np.testing.assert_almost_equal(out_np, out_mge.numpy(), decimal=6)
def test_qadd():
inp_scale = 0.5
outp_scale = 0.2
x = np.arange(6).reshape(2, 3).astype("float32")
y = np.arange(6).reshape(2, 3).astype("float32")
x = tensor(x, dtype=dtype.qint8(inp_scale))
y = tensor(y, dtype=dtype.qint8(inp_scale))
result_mge = F.elemwise._elemwise_multi_type(
x, y, mode="qadd", dtype=dtype.qint8(outp_scale)
)
result_mge = result_mge.astype("float32").numpy()
result_expect = x.astype("float32").numpy() + y.astype("float32").numpy()
np.testing.assert_almost_equal(result_mge, result_expect, decimal=6)
def test_int32_input():
x = tensor(np.array([1, 2, 3, 4, 5]), dtype="int32")
for op_name in elemwise.__all__:
op = getattr(elemwise, op_name)
nargs = op.__code__.co_argcount
if op_name == "clip":
inp = (x, 0, 1)
elif op_name.endswith("_shift"):
inp = (x, 1)
elif op_name.startswith("logical_"):
continue
else:
inp = (x,) * nargs
y = op(*inp)
y.numpy()
@pytest.mark.parametrize("is_trace", [True, False])
def test_empty_tensor(is_trace):
binary_func = []
unary_func = []
for op_name in elemwise.__all__:
op = getattr(elemwise, op_name)
nargs = op.__code__.co_argcount
if op_name == "clip":
unary_func.append(["clip", lambda x, f=op: f(x, lower=0, upper=1)])
elif op_name.endswith("_shift"):
unary_func.append(
[op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="int32"), 1)]
)
elif op_name.startswith("logical_"): # logical_xxx op only accept boolean type
if nargs == 1:
unary_func.append(
[op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="bool"))]
)
else:
assert nargs == 2
binary_func.append(
[
op_name,
lambda x, y, f=op: f(
tensor(x.numpy(), dtype="bool"),
tensor(y.numpy(), dtype="bool"),
),
]
)
elif nargs == 1:
unary_func.append([op_name, op])
elif nargs == 2:
binary_func.append([op_name, op])
else:
raise NotImplementedError("nargs {}".format(nargs))
def run_test(func, args, ref_shape, is_trace, sym=False):
args = [ | tensor(t, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose( | F.abs(-3.0) | megengine.functional.abs |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert | Elemwise(mode=key) | megengine.functional.elemwise.Elemwise |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == | Elemwise(mode=mode) | megengine.functional.elemwise.Elemwise |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
| F.mul(-3.0, -4.0) | megengine.functional.mul |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, ( | F.logical_not(xx) | megengine.functional.logical_not |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, | F.logical_and(xx, yy) | megengine.functional.logical_and |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, | F.logical_or(xx, yy) | megengine.functional.logical_or |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, | F.logical_xor(xx, yy) | megengine.functional.logical_xor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = tensor(x)
yy = tensor(y)
out_np = np.log(np.exp(x) + np.exp(y))
out_mge = F.logaddexp(xx, yy)
np.testing.assert_almost_equal(out_np, out_mge.numpy(), decimal=6)
def test_qadd():
inp_scale = 0.5
outp_scale = 0.2
x = np.arange(6).reshape(2, 3).astype("float32")
y = np.arange(6).reshape(2, 3).astype("float32")
x = tensor(x, dtype=dtype.qint8(inp_scale))
y = tensor(y, dtype=dtype.qint8(inp_scale))
result_mge = F.elemwise._elemwise_multi_type(
x, y, mode="qadd", dtype=dtype.qint8(outp_scale)
)
result_mge = result_mge.astype("float32").numpy()
result_expect = x.astype("float32").numpy() + y.astype("float32").numpy()
np.testing.assert_almost_equal(result_mge, result_expect, decimal=6)
def test_int32_input():
x = tensor(np.array([1, 2, 3, 4, 5]), dtype="int32")
for op_name in elemwise.__all__:
op = getattr(elemwise, op_name)
nargs = op.__code__.co_argcount
if op_name == "clip":
inp = (x, 0, 1)
elif op_name.endswith("_shift"):
inp = (x, 1)
elif op_name.startswith("logical_"):
continue
else:
inp = (x,) * nargs
y = op(*inp)
y.numpy()
@pytest.mark.parametrize("is_trace", [True, False])
def test_empty_tensor(is_trace):
binary_func = []
unary_func = []
for op_name in elemwise.__all__:
op = getattr(elemwise, op_name)
nargs = op.__code__.co_argcount
if op_name == "clip":
unary_func.append(["clip", lambda x, f=op: f(x, lower=0, upper=1)])
elif op_name.endswith("_shift"):
unary_func.append(
[op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="int32"), 1)]
)
elif op_name.startswith("logical_"): # logical_xxx op only accept boolean type
if nargs == 1:
unary_func.append(
[op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="bool"))]
)
else:
assert nargs == 2
binary_func.append(
[
op_name,
lambda x, y, f=op: f(
tensor(x.numpy(), dtype="bool"),
tensor(y.numpy(), dtype="bool"),
),
]
)
elif nargs == 1:
unary_func.append([op_name, op])
elif nargs == 2:
binary_func.append([op_name, op])
else:
raise NotImplementedError("nargs {}".format(nargs))
def run_test(func, args, ref_shape, is_trace, sym=False):
args = [tensor(t, dtype="float32") for t in args]
if is_trace:
func = | trace(symbolic=sym) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs( | tensor([-3.0, -4.0, -5.0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul( | tensor([3.0, 4.0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, | tensor([3.0, 4.0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul( | tensor([3.0, 4.0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), | tensor([3.0, 4.0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div( | tensor([3.0, 4.0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
( | tensor([3, 4]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div( | tensor([-5.0, -7.0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
( | tensor([-5, -7]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose( | F.sign(x) | megengine.functional.sign |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip( | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan( | tensor(case) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf( | tensor(case) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = | F.nn.cross_entropy(data, label) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = | F.nn.cross_entropy(data, label) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = | F.nn.cross_entropy(data, label) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = | tensor(logits, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = | tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = | tensor(logits[perm], dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = | tensor(label[perm], dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = | F.nn.cross_entropy(logits, label, reduction="none") | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = | F.nn.cross_entropy(logits_perm, label_perm, reduction="none") | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = | F.nn.cross_entropy(logits, label, reduction="sum") | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = | F.nn.cross_entropy(logits, label, reduction="mean") | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = | F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1) | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy( | tensor(x, "float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), | tensor(y, "int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
| F.nn.cross_entropy(logits, label, reduction="MEAN") | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
| F.nn.cross_entropy(logits, label, reduction="max") | megengine.functional.nn.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
def ctc_nll_naive_npy(
pred,
pred_lengths,
label,
label_lengths,
blank=0,
reduction="mean",
time_major=False,
):
"""naive :func:`ctc_nll` using numpy arrays. Used for testing and helping
our user to understand how CTC works. Only ``LABEL_COMPACT`` mode is
supported."""
pred = np.asarray(pred, dtype=np.float32)
pred_lengths = np.asarray(pred_lengths, dtype=np.int8)
label = np.asarray(label, dtype=np.int32)
label_lengths = np.asarray(label_lengths, dtype=np.int32)
if time_major:
pred = np.transpose(pred, (1, 0, 2))
# pred in (N, T, P) format
batch_size, time_len, nr_class = pred.shape
assert pred_lengths.shape == (batch_size,) and pred_lengths.max() <= pred.shape[1]
assert label_lengths.shape == (batch_size,)
assert label.shape == (label_lengths.sum(),) and label.max() < nr_class
ret = np.empty((batch_size,), dtype=np.float32)
label_start = 0
for i in range(batch_size):
label_end = label_start + label_lengths[i]
ret[i] = _ctc_npy_single_seq(
pred[i][: pred_lengths[i]], label[label_start:label_end], blank
)
label_start = label_end
if reduction == "mean":
return (ret / label_lengths).mean()
elif reduction == "sum":
return ret.sum()
elif reduction == "none":
return ret
else:
raise ValueError("{} is not a valid value for reduction".format(reduction))
def _ctc_npy_single_seq(pred, label, blank):
def safelog(x):
eps = np.finfo(x.dtype).tiny
return np.log(np.maximum(x, eps))
def log_sum_exp(x, y):
x, y = np.maximum(x, y), np.minimum(x, y)
return x + np.log1p(np.exp(y - x))
assert np.abs(pred.sum(axis=1) - 1).max() <= 1e-3
len_pred, alphabet_size = pred.shape
(len_label,) = label.shape
len_ex_label = len_label * 2 + 1
ex_label = (np.zeros(len_ex_label)).astype(np.int32) + blank
ex_label[1::2] = label
prob = np.zeros(len_ex_label, dtype=np.float32)
prob[0] = pred[0][ex_label[0]]
prob[1] = pred[0][ex_label[1]]
prob = safelog(prob) # compute on log scale
ex_label_pmask = ex_label[2:] != ex_label[:-2]
for t in range(1, len_pred):
# enter loop: prob[i] = log(p(pred[:t+1], label[:i+1]))
new_prob = prob.copy()
new_prob[1:] = log_sum_exp(new_prob[1:], prob[:-1])
new_prob[2:] = (
new_prob[2:] * (1 - ex_label_pmask)
+ log_sum_exp(new_prob[2:], prob[:-2]) * ex_label_pmask
)
new_prob += safelog(pred[t, ex_label])
prob = new_prob
return -log_sum_exp(prob[-1], prob[-2])
def test_ctc_loss():
def test_func(T, C, N):
input = np.random.randn(T, N, C)
input = F.softmax(tensor(input), axis=-1).numpy()
input_lengths = np.ones(N, dtype=np.int32) * T
target_lengths = np.random.randint(low=1, high=T + 1, size=(N,), dtype=np.int32)
target = np.random.randint(
low=1, high=C, size=(sum(target_lengths)), dtype=np.int32
)
input_mge = | tensor(input) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
def ctc_nll_naive_npy(
pred,
pred_lengths,
label,
label_lengths,
blank=0,
reduction="mean",
time_major=False,
):
"""naive :func:`ctc_nll` using numpy arrays. Used for testing and helping
our user to understand how CTC works. Only ``LABEL_COMPACT`` mode is
supported."""
pred = np.asarray(pred, dtype=np.float32)
pred_lengths = np.asarray(pred_lengths, dtype=np.int8)
label = np.asarray(label, dtype=np.int32)
label_lengths = np.asarray(label_lengths, dtype=np.int32)
if time_major:
pred = np.transpose(pred, (1, 0, 2))
# pred in (N, T, P) format
batch_size, time_len, nr_class = pred.shape
assert pred_lengths.shape == (batch_size,) and pred_lengths.max() <= pred.shape[1]
assert label_lengths.shape == (batch_size,)
assert label.shape == (label_lengths.sum(),) and label.max() < nr_class
ret = np.empty((batch_size,), dtype=np.float32)
label_start = 0
for i in range(batch_size):
label_end = label_start + label_lengths[i]
ret[i] = _ctc_npy_single_seq(
pred[i][: pred_lengths[i]], label[label_start:label_end], blank
)
label_start = label_end
if reduction == "mean":
return (ret / label_lengths).mean()
elif reduction == "sum":
return ret.sum()
elif reduction == "none":
return ret
else:
raise ValueError("{} is not a valid value for reduction".format(reduction))
def _ctc_npy_single_seq(pred, label, blank):
def safelog(x):
eps = np.finfo(x.dtype).tiny
return np.log(np.maximum(x, eps))
def log_sum_exp(x, y):
x, y = np.maximum(x, y), np.minimum(x, y)
return x + np.log1p(np.exp(y - x))
assert np.abs(pred.sum(axis=1) - 1).max() <= 1e-3
len_pred, alphabet_size = pred.shape
(len_label,) = label.shape
len_ex_label = len_label * 2 + 1
ex_label = (np.zeros(len_ex_label)).astype(np.int32) + blank
ex_label[1::2] = label
prob = np.zeros(len_ex_label, dtype=np.float32)
prob[0] = pred[0][ex_label[0]]
prob[1] = pred[0][ex_label[1]]
prob = safelog(prob) # compute on log scale
ex_label_pmask = ex_label[2:] != ex_label[:-2]
for t in range(1, len_pred):
# enter loop: prob[i] = log(p(pred[:t+1], label[:i+1]))
new_prob = prob.copy()
new_prob[1:] = log_sum_exp(new_prob[1:], prob[:-1])
new_prob[2:] = (
new_prob[2:] * (1 - ex_label_pmask)
+ log_sum_exp(new_prob[2:], prob[:-2]) * ex_label_pmask
)
new_prob += safelog(pred[t, ex_label])
prob = new_prob
return -log_sum_exp(prob[-1], prob[-2])
def test_ctc_loss():
def test_func(T, C, N):
input = np.random.randn(T, N, C)
input = F.softmax(tensor(input), axis=-1).numpy()
input_lengths = np.ones(N, dtype=np.int32) * T
target_lengths = np.random.randint(low=1, high=T + 1, size=(N,), dtype=np.int32)
target = np.random.randint(
low=1, high=C, size=(sum(target_lengths)), dtype=np.int32
)
input_mge = tensor(input)
input_lengths_mge = | tensor(input_lengths) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
def ctc_nll_naive_npy(
pred,
pred_lengths,
label,
label_lengths,
blank=0,
reduction="mean",
time_major=False,
):
"""naive :func:`ctc_nll` using numpy arrays. Used for testing and helping
our user to understand how CTC works. Only ``LABEL_COMPACT`` mode is
supported."""
pred = np.asarray(pred, dtype=np.float32)
pred_lengths = np.asarray(pred_lengths, dtype=np.int8)
label = np.asarray(label, dtype=np.int32)
label_lengths = np.asarray(label_lengths, dtype=np.int32)
if time_major:
pred = np.transpose(pred, (1, 0, 2))
# pred in (N, T, P) format
batch_size, time_len, nr_class = pred.shape
assert pred_lengths.shape == (batch_size,) and pred_lengths.max() <= pred.shape[1]
assert label_lengths.shape == (batch_size,)
assert label.shape == (label_lengths.sum(),) and label.max() < nr_class
ret = np.empty((batch_size,), dtype=np.float32)
label_start = 0
for i in range(batch_size):
label_end = label_start + label_lengths[i]
ret[i] = _ctc_npy_single_seq(
pred[i][: pred_lengths[i]], label[label_start:label_end], blank
)
label_start = label_end
if reduction == "mean":
return (ret / label_lengths).mean()
elif reduction == "sum":
return ret.sum()
elif reduction == "none":
return ret
else:
raise ValueError("{} is not a valid value for reduction".format(reduction))
def _ctc_npy_single_seq(pred, label, blank):
def safelog(x):
eps = np.finfo(x.dtype).tiny
return np.log(np.maximum(x, eps))
def log_sum_exp(x, y):
x, y = np.maximum(x, y), np.minimum(x, y)
return x + np.log1p(np.exp(y - x))
assert np.abs(pred.sum(axis=1) - 1).max() <= 1e-3
len_pred, alphabet_size = pred.shape
(len_label,) = label.shape
len_ex_label = len_label * 2 + 1
ex_label = (np.zeros(len_ex_label)).astype(np.int32) + blank
ex_label[1::2] = label
prob = np.zeros(len_ex_label, dtype=np.float32)
prob[0] = pred[0][ex_label[0]]
prob[1] = pred[0][ex_label[1]]
prob = safelog(prob) # compute on log scale
ex_label_pmask = ex_label[2:] != ex_label[:-2]
for t in range(1, len_pred):
# enter loop: prob[i] = log(p(pred[:t+1], label[:i+1]))
new_prob = prob.copy()
new_prob[1:] = log_sum_exp(new_prob[1:], prob[:-1])
new_prob[2:] = (
new_prob[2:] * (1 - ex_label_pmask)
+ log_sum_exp(new_prob[2:], prob[:-2]) * ex_label_pmask
)
new_prob += safelog(pred[t, ex_label])
prob = new_prob
return -log_sum_exp(prob[-1], prob[-2])
def test_ctc_loss():
def test_func(T, C, N):
input = np.random.randn(T, N, C)
input = F.softmax(tensor(input), axis=-1).numpy()
input_lengths = np.ones(N, dtype=np.int32) * T
target_lengths = np.random.randint(low=1, high=T + 1, size=(N,), dtype=np.int32)
target = np.random.randint(
low=1, high=C, size=(sum(target_lengths)), dtype=np.int32
)
input_mge = tensor(input)
input_lengths_mge = tensor(input_lengths)
target_mge = | tensor(target) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
def ctc_nll_naive_npy(
pred,
pred_lengths,
label,
label_lengths,
blank=0,
reduction="mean",
time_major=False,
):
"""naive :func:`ctc_nll` using numpy arrays. Used for testing and helping
our user to understand how CTC works. Only ``LABEL_COMPACT`` mode is
supported."""
pred = np.asarray(pred, dtype=np.float32)
pred_lengths = np.asarray(pred_lengths, dtype=np.int8)
label = np.asarray(label, dtype=np.int32)
label_lengths = np.asarray(label_lengths, dtype=np.int32)
if time_major:
pred = np.transpose(pred, (1, 0, 2))
# pred in (N, T, P) format
batch_size, time_len, nr_class = pred.shape
assert pred_lengths.shape == (batch_size,) and pred_lengths.max() <= pred.shape[1]
assert label_lengths.shape == (batch_size,)
assert label.shape == (label_lengths.sum(),) and label.max() < nr_class
ret = np.empty((batch_size,), dtype=np.float32)
label_start = 0
for i in range(batch_size):
label_end = label_start + label_lengths[i]
ret[i] = _ctc_npy_single_seq(
pred[i][: pred_lengths[i]], label[label_start:label_end], blank
)
label_start = label_end
if reduction == "mean":
return (ret / label_lengths).mean()
elif reduction == "sum":
return ret.sum()
elif reduction == "none":
return ret
else:
raise ValueError("{} is not a valid value for reduction".format(reduction))
def _ctc_npy_single_seq(pred, label, blank):
def safelog(x):
eps = np.finfo(x.dtype).tiny
return np.log(np.maximum(x, eps))
def log_sum_exp(x, y):
x, y = np.maximum(x, y), np.minimum(x, y)
return x + np.log1p(np.exp(y - x))
assert np.abs(pred.sum(axis=1) - 1).max() <= 1e-3
len_pred, alphabet_size = pred.shape
(len_label,) = label.shape
len_ex_label = len_label * 2 + 1
ex_label = (np.zeros(len_ex_label)).astype(np.int32) + blank
ex_label[1::2] = label
prob = np.zeros(len_ex_label, dtype=np.float32)
prob[0] = pred[0][ex_label[0]]
prob[1] = pred[0][ex_label[1]]
prob = safelog(prob) # compute on log scale
ex_label_pmask = ex_label[2:] != ex_label[:-2]
for t in range(1, len_pred):
# enter loop: prob[i] = log(p(pred[:t+1], label[:i+1]))
new_prob = prob.copy()
new_prob[1:] = log_sum_exp(new_prob[1:], prob[:-1])
new_prob[2:] = (
new_prob[2:] * (1 - ex_label_pmask)
+ log_sum_exp(new_prob[2:], prob[:-2]) * ex_label_pmask
)
new_prob += safelog(pred[t, ex_label])
prob = new_prob
return -log_sum_exp(prob[-1], prob[-2])
def test_ctc_loss():
def test_func(T, C, N):
input = np.random.randn(T, N, C)
input = F.softmax(tensor(input), axis=-1).numpy()
input_lengths = np.ones(N, dtype=np.int32) * T
target_lengths = np.random.randint(low=1, high=T + 1, size=(N,), dtype=np.int32)
target = np.random.randint(
low=1, high=C, size=(sum(target_lengths)), dtype=np.int32
)
input_mge = tensor(input)
input_lengths_mge = tensor(input_lengths)
target_mge = tensor(target)
target_lengths_mge = | tensor(target_lengths) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = | tensor([[0, 50], [0, -150]]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = | tensor([1, 0]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = | tensor([0, 1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
def test_cross_entropy_with_logits():
data = tensor([[0, 50], [0, -150]]).astype(np.float32)
label = tensor([1, 0]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
label = tensor([0, 1]).astype(np.int32)
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 100)
label = np.array([1, 0])
loss = F.nn.cross_entropy(data, label)
np.testing.assert_allclose(loss.numpy(), 0.0)
def test_cross_entropy():
def softmax(x):
x = np.exp(x)
x /= x.sum(1, keepdims=True)
return x
def ref(x, y):
return np.mean([-np.log(x[i, y[i]]) for i in range(len(y))])
x = (np.random.rand(5, 10) - 0.5) * 4
y = np.random.randint(10, size=(5,))
for i in range(len(x)):
x[i, y[i]] += np.random.rand() * 2
x = softmax(x)
l_ref = ref(x, y)
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False)
np.testing.assert_allclose(l.numpy(), l_ref)
def test_cross_entropy_reduction():
logits = np.random.randn(16, 10)
label = np.random.randint(10, size=[16])
logits = tensor(logits, dtype="float32")
label = tensor(label, dtype="int32")
perm = np.random.permutation(16)
logits_perm = tensor(logits[perm], dtype="float32")
label_perm = tensor(label[perm], dtype="int32")
loss = F.nn.cross_entropy(logits, label, reduction="none")
loss_perm = F.nn.cross_entropy(logits_perm, label_perm, reduction="none")
np.testing.assert_allclose(loss.numpy()[perm], loss_perm.numpy())
loss_sum = F.nn.cross_entropy(logits, label, reduction="sum")
np.testing.assert_allclose(loss.numpy().sum(), loss_sum.numpy(), rtol=2e-7)
loss_mean = F.nn.cross_entropy(logits, label, reduction="mean")
np.testing.assert_allclose(loss_mean.numpy(), loss_sum.numpy() / 16)
loss_ls = F.nn.cross_entropy(logits, label, reduction="mean", label_smooth=0.1)
loss_ls_none_reduce = F.nn.cross_entropy(
logits, label, reduction="none", label_smooth=0.1
)
np.testing.assert_allclose(
loss_ls.numpy(), loss_ls_none_reduce.numpy().mean(), rtol=2e-7
)
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="MEAN")
with pytest.raises(ValueError):
F.nn.cross_entropy(logits, label, reduction="max")
def ctc_nll_naive_npy(
pred,
pred_lengths,
label,
label_lengths,
blank=0,
reduction="mean",
time_major=False,
):
"""naive :func:`ctc_nll` using numpy arrays. Used for testing and helping
our user to understand how CTC works. Only ``LABEL_COMPACT`` mode is
supported."""
pred = np.asarray(pred, dtype=np.float32)
pred_lengths = np.asarray(pred_lengths, dtype=np.int8)
label = np.asarray(label, dtype=np.int32)
label_lengths = np.asarray(label_lengths, dtype=np.int32)
if time_major:
pred = np.transpose(pred, (1, 0, 2))
# pred in (N, T, P) format
batch_size, time_len, nr_class = pred.shape
assert pred_lengths.shape == (batch_size,) and pred_lengths.max() <= pred.shape[1]
assert label_lengths.shape == (batch_size,)
assert label.shape == (label_lengths.sum(),) and label.max() < nr_class
ret = np.empty((batch_size,), dtype=np.float32)
label_start = 0
for i in range(batch_size):
label_end = label_start + label_lengths[i]
ret[i] = _ctc_npy_single_seq(
pred[i][: pred_lengths[i]], label[label_start:label_end], blank
)
label_start = label_end
if reduction == "mean":
return (ret / label_lengths).mean()
elif reduction == "sum":
return ret.sum()
elif reduction == "none":
return ret
else:
raise ValueError("{} is not a valid value for reduction".format(reduction))
def _ctc_npy_single_seq(pred, label, blank):
def safelog(x):
eps = np.finfo(x.dtype).tiny
return np.log(np.maximum(x, eps))
def log_sum_exp(x, y):
x, y = np.maximum(x, y), np.minimum(x, y)
return x + np.log1p(np.exp(y - x))
assert np.abs(pred.sum(axis=1) - 1).max() <= 1e-3
len_pred, alphabet_size = pred.shape
(len_label,) = label.shape
len_ex_label = len_label * 2 + 1
ex_label = (np.zeros(len_ex_label)).astype(np.int32) + blank
ex_label[1::2] = label
prob = np.zeros(len_ex_label, dtype=np.float32)
prob[0] = pred[0][ex_label[0]]
prob[1] = pred[0][ex_label[1]]
prob = safelog(prob) # compute on log scale
ex_label_pmask = ex_label[2:] != ex_label[:-2]
for t in range(1, len_pred):
# enter loop: prob[i] = log(p(pred[:t+1], label[:i+1]))
new_prob = prob.copy()
new_prob[1:] = log_sum_exp(new_prob[1:], prob[:-1])
new_prob[2:] = (
new_prob[2:] * (1 - ex_label_pmask)
+ log_sum_exp(new_prob[2:], prob[:-2]) * ex_label_pmask
)
new_prob += safelog(pred[t, ex_label])
prob = new_prob
return -log_sum_exp(prob[-1], prob[-2])
def test_ctc_loss():
def test_func(T, C, N):
input = np.random.randn(T, N, C)
input = F.softmax( | tensor(input) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = | tensor(dtype=np.float32) | megengine.core.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), | tensor(dtype=np.int32) | megengine.core.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = | load(fout) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
train_func(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
def test_sgd():
class CheckValue:
def __init__(self, net, **kwarg):
self.slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
train_func(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
def test_sgd():
class CheckValue:
def __init__(self, net, **kwarg):
self.slots = TensorDict()
for param in net.parameters():
self.slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
if hasattr(self, "momentum"):
self.slots[param] = grad + self.slots[param] * self.momentum
delta = -self.lr * self.slots[param]
else:
delta = -self.lr * grad
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"momentum": 0.9, "lr": 0.01}, # SGD with momentum
{"lr": 0.01}, # simple SGD
{"weight_decay": 0.1, "lr": 0.01}, # with weight_decay
]
for case in cases:
_test_optimizer("SGD", case, CheckValue)
_test_optimizer("SGD", case, CheckValue, update_lr=True)
def test_adam():
class CheckValue:
def __init__(self, net, **kwarg):
self.m_slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
train_func(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
def test_sgd():
class CheckValue:
def __init__(self, net, **kwarg):
self.slots = TensorDict()
for param in net.parameters():
self.slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
if hasattr(self, "momentum"):
self.slots[param] = grad + self.slots[param] * self.momentum
delta = -self.lr * self.slots[param]
else:
delta = -self.lr * grad
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"momentum": 0.9, "lr": 0.01}, # SGD with momentum
{"lr": 0.01}, # simple SGD
{"weight_decay": 0.1, "lr": 0.01}, # with weight_decay
]
for case in cases:
_test_optimizer("SGD", case, CheckValue)
_test_optimizer("SGD", case, CheckValue, update_lr=True)
def test_adam():
class CheckValue:
def __init__(self, net, **kwarg):
self.m_slots = TensorDict()
self.v_slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
train_func(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
def test_sgd():
class CheckValue:
def __init__(self, net, **kwarg):
self.slots = TensorDict()
for param in net.parameters():
self.slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
if hasattr(self, "momentum"):
self.slots[param] = grad + self.slots[param] * self.momentum
delta = -self.lr * self.slots[param]
else:
delta = -self.lr * grad
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"momentum": 0.9, "lr": 0.01}, # SGD with momentum
{"lr": 0.01}, # simple SGD
{"weight_decay": 0.1, "lr": 0.01}, # with weight_decay
]
for case in cases:
_test_optimizer("SGD", case, CheckValue)
_test_optimizer("SGD", case, CheckValue, update_lr=True)
def test_adam():
class CheckValue:
def __init__(self, net, **kwarg):
self.m_slots = TensorDict()
self.v_slots = TensorDict()
for param in net.parameters():
self.m_slots[param] = np.zeros(param.shape).astype(np.float32)
self.v_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
m = self.m_slots[param]
v = self.v_slots[param]
m *= self.betas[0]
m += (1 - self.betas[0]) * grad
v *= self.betas[1]
v += (1 - self.betas[1]) * grad * grad
delta = (m / (1 - self.betas[0] ** step)) / (
np.sqrt(v / (1 - self.betas[1] ** step)) + self.eps
)
assertTensorClose(param.numpy(), ori_params[param] - self.lr * delta)
cases = [
{"betas": (0.8, 0.9), "eps": 1e-04, "lr": 0.01},
{
"betas": (0.8, 0.9),
"eps": 1e-04,
"lr": 0.01,
"weight_decay": 0.1,
}, # with weight_decay
]
for case in cases:
_test_optimizer("Adam", case, CheckValue)
_test_optimizer("Adam", case, CheckValue, update_lr=True)
def test_adagrad():
class CheckValue:
def __init__(self, net, **kwarg):
self.s_slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
train_func(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
def test_sgd():
class CheckValue:
def __init__(self, net, **kwarg):
self.slots = TensorDict()
for param in net.parameters():
self.slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
if hasattr(self, "momentum"):
self.slots[param] = grad + self.slots[param] * self.momentum
delta = -self.lr * self.slots[param]
else:
delta = -self.lr * grad
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"momentum": 0.9, "lr": 0.01}, # SGD with momentum
{"lr": 0.01}, # simple SGD
{"weight_decay": 0.1, "lr": 0.01}, # with weight_decay
]
for case in cases:
_test_optimizer("SGD", case, CheckValue)
_test_optimizer("SGD", case, CheckValue, update_lr=True)
def test_adam():
class CheckValue:
def __init__(self, net, **kwarg):
self.m_slots = TensorDict()
self.v_slots = TensorDict()
for param in net.parameters():
self.m_slots[param] = np.zeros(param.shape).astype(np.float32)
self.v_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
m = self.m_slots[param]
v = self.v_slots[param]
m *= self.betas[0]
m += (1 - self.betas[0]) * grad
v *= self.betas[1]
v += (1 - self.betas[1]) * grad * grad
delta = (m / (1 - self.betas[0] ** step)) / (
np.sqrt(v / (1 - self.betas[1] ** step)) + self.eps
)
assertTensorClose(param.numpy(), ori_params[param] - self.lr * delta)
cases = [
{"betas": (0.8, 0.9), "eps": 1e-04, "lr": 0.01},
{
"betas": (0.8, 0.9),
"eps": 1e-04,
"lr": 0.01,
"weight_decay": 0.1,
}, # with weight_decay
]
for case in cases:
_test_optimizer("Adam", case, CheckValue)
_test_optimizer("Adam", case, CheckValue, update_lr=True)
def test_adagrad():
class CheckValue:
def __init__(self, net, **kwarg):
self.s_slots = TensorDict()
for param in net.parameters():
self.s_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
self.s_slots[param] += grad ** 2
delta = grad / (self.s_slots[param] + self.eps) ** 0.5
delta *= -(self.lr / (1 + (step - 1) * self.lr_decay))
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"lr": 0.01, "eps": 1e-06, "lr_decay": 0.01},
{"lr": 0.01, "eps": 1e-06, "lr_decay": 0.0}, # without lr_decay
{
"lr": 0.01,
"eps": 1e-06,
"lr_decay": 0.01,
"weight_decay": 0.1,
}, # with weight_decay
]
for case in cases:
_test_optimizer("Adagrad", case, CheckValue)
_test_optimizer("Adagrad", case, CheckValue, update_lr=True)
def test_adadelta():
class CheckValue:
def __init__(self, net, **kwarg):
self.s_slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, optimizer, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def get_input():
batch_size, input_dim = 2, 28
data_shape, label_shape = (batch_size, input_dim), (batch_size,)
data, label = tensor(dtype=np.float32), tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = optimizer.SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slots[param] = slots[param] * 0.9 + param.grad.numpy()
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = optimizer.SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slots[param] = slots[param] * 0.9 + param.grad.numpy()
assertTensorClose(param.numpy(), orig_param - 0.01 * slots[param])
def _test_optimizer(opt_str, test_case, check_class, update_lr=False):
iter_num = 3
data, data_shape, label, label_shape = get_input()
net = MLP()
opt = getattr(optimizer, opt_str)(net.parameters(), **test_case)
check_func = check_class(net, **test_case)
step = 0
# eager graph
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
# static graph
@trace
def train_func(data, label):
pred = net(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for i in range(iter_num):
if update_lr and i == 1: # change learning rate
for group in opt.param_groups:
group["lr"] += 0.01
check_func.lr += 0.01
opt.zero_grad()
ori_params = TensorDict()
for param in net.parameters():
ori_params[param] = np.copy(param.numpy())
train_func(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step += 1
check_func(ori_params, net.parameters(), step)
def test_sgd():
class CheckValue:
def __init__(self, net, **kwarg):
self.slots = TensorDict()
for param in net.parameters():
self.slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
if hasattr(self, "momentum"):
self.slots[param] = grad + self.slots[param] * self.momentum
delta = -self.lr * self.slots[param]
else:
delta = -self.lr * grad
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"momentum": 0.9, "lr": 0.01}, # SGD with momentum
{"lr": 0.01}, # simple SGD
{"weight_decay": 0.1, "lr": 0.01}, # with weight_decay
]
for case in cases:
_test_optimizer("SGD", case, CheckValue)
_test_optimizer("SGD", case, CheckValue, update_lr=True)
def test_adam():
class CheckValue:
def __init__(self, net, **kwarg):
self.m_slots = TensorDict()
self.v_slots = TensorDict()
for param in net.parameters():
self.m_slots[param] = np.zeros(param.shape).astype(np.float32)
self.v_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
m = self.m_slots[param]
v = self.v_slots[param]
m *= self.betas[0]
m += (1 - self.betas[0]) * grad
v *= self.betas[1]
v += (1 - self.betas[1]) * grad * grad
delta = (m / (1 - self.betas[0] ** step)) / (
np.sqrt(v / (1 - self.betas[1] ** step)) + self.eps
)
assertTensorClose(param.numpy(), ori_params[param] - self.lr * delta)
cases = [
{"betas": (0.8, 0.9), "eps": 1e-04, "lr": 0.01},
{
"betas": (0.8, 0.9),
"eps": 1e-04,
"lr": 0.01,
"weight_decay": 0.1,
}, # with weight_decay
]
for case in cases:
_test_optimizer("Adam", case, CheckValue)
_test_optimizer("Adam", case, CheckValue, update_lr=True)
def test_adagrad():
class CheckValue:
def __init__(self, net, **kwarg):
self.s_slots = TensorDict()
for param in net.parameters():
self.s_slots[param] = np.zeros(param.shape).astype(np.float32)
for k, v in kwarg.items():
setattr(self, k, v)
def __call__(self, ori_params, new_params, step):
for param in new_params:
grad = param.grad.numpy()
self.s_slots[param] += grad ** 2
delta = grad / (self.s_slots[param] + self.eps) ** 0.5
delta *= -(self.lr / (1 + (step - 1) * self.lr_decay))
assertTensorClose(param.numpy(), ori_params[param] + delta)
cases = [
{"lr": 0.01, "eps": 1e-06, "lr_decay": 0.01},
{"lr": 0.01, "eps": 1e-06, "lr_decay": 0.0}, # without lr_decay
{
"lr": 0.01,
"eps": 1e-06,
"lr_decay": 0.01,
"weight_decay": 0.1,
}, # with weight_decay
]
for case in cases:
_test_optimizer("Adagrad", case, CheckValue)
_test_optimizer("Adagrad", case, CheckValue, update_lr=True)
def test_adadelta():
class CheckValue:
def __init__(self, net, **kwarg):
self.s_slots = TensorDict()
self.a_slots = | TensorDict() | megengine.core.TensorDict |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
import json
import os
import pytest
from megengine import Parameter, tensor
from megengine.core import option
from megengine.module import Module
from megengine.utils.profiler import Profiler, scope
class Simple(Module):
def __init__(self):
super().__init__()
self.a = | Parameter([1.23], dtype="float32") | megengine.Parameter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
import json
import os
import pytest
from megengine import Parameter, tensor
from megengine.core import option
from megengine.module import Module
from megengine.utils.profiler import Profiler, scope
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.23], dtype="float32")
def forward(self, x):
x = x * self.a
return x
def test_profiler():
profile_prefix = "pytest_profile"
profile_format = "chrome_timeline.json"
profile_path = "{}.{}".format(profile_prefix, profile_format)
with | Profiler(profile_prefix, format=profile_format) | megengine.utils.profiler.Profiler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
import json
import os
import pytest
from megengine import Parameter, tensor
from megengine.core import option
from megengine.module import Module
from megengine.utils.profiler import Profiler, scope
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.23], dtype="float32")
def forward(self, x):
x = x * self.a
return x
def test_profiler():
profile_prefix = "pytest_profile"
profile_format = "chrome_timeline.json"
profile_path = "{}.{}".format(profile_prefix, profile_format)
with Profiler(profile_prefix, format=profile_format):
with | scope("my_scope") | megengine.utils.profiler.scope |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
import json
import os
import pytest
from megengine import Parameter, tensor
from megengine.core import option
from megengine.module import Module
from megengine.utils.profiler import Profiler, scope
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.23], dtype="float32")
def forward(self, x):
x = x * self.a
return x
def test_profiler():
profile_prefix = "pytest_profile"
profile_format = "chrome_timeline.json"
profile_path = "{}.{}".format(profile_prefix, profile_format)
with Profiler(profile_prefix, format=profile_format):
with scope("my_scope"):
oup = Simple()( | tensor([1.23], dtype="float32") | megengine.tensor |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import getopt
import os
import runpy
import sys
from megengine.logger import get_logger
from megengine.utils.profiler import Profiler, merge_trace_events
def main():
parser = argparse.ArgumentParser(
prog="megengine.tools.profiler", description="Profiling megengine program"
)
parser.add_argument(
"-m", "--module", action="store_true", help="whether launch program as module"
)
parser.add_argument("-o", "--output", type=str, help="output file location")
parser.add_argument(
"-f",
"--format",
action="append",
type=str,
help="output file format",
choices=Profiler.valid_formats,
)
parser.add_argument(
"--merge_trace_events", action="store_true",
)
parser.add_argument(
"--clean", action="store_true",
)
for opt in Profiler.valid_options:
parser.add_argument("--" + opt, type=int, default=None)
args, extras = parser.parse_known_args(sys.argv[1:])
prof_args = {}
for opt in Profiler.valid_options:
optval = getattr(args, opt, None)
if optval is not None:
prof_args[opt] = optval
if args.output is not None:
prof_args["path"] = args.output
if args.format:
prof_args["formats"] = args.format
if args.clean:
for file in os.listdir(profiler.directory):
os.remove(os.path.join(profiler.directory, file))
if len(extras) == 0:
if not args.merge_trace_events:
parser.print_usage()
exit(1)
else:
filename = extras[0]
if not args.module:
if not os.path.exists(filename):
get_logger().fatal("cannot find file {}".format(filename))
exit(1)
filename = os.path.realpath(filename)
# Replace profiler's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(filename)
sys.argv[:] = [filename, *extras[1:]]
profiler = | Profiler(**prof_args) | megengine.utils.profiler.Profiler |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import getopt
import os
import runpy
import sys
from megengine.logger import get_logger
from megengine.utils.profiler import Profiler, merge_trace_events
def main():
parser = argparse.ArgumentParser(
prog="megengine.tools.profiler", description="Profiling megengine program"
)
parser.add_argument(
"-m", "--module", action="store_true", help="whether launch program as module"
)
parser.add_argument("-o", "--output", type=str, help="output file location")
parser.add_argument(
"-f",
"--format",
action="append",
type=str,
help="output file format",
choices=Profiler.valid_formats,
)
parser.add_argument(
"--merge_trace_events", action="store_true",
)
parser.add_argument(
"--clean", action="store_true",
)
for opt in Profiler.valid_options:
parser.add_argument("--" + opt, type=int, default=None)
args, extras = parser.parse_known_args(sys.argv[1:])
prof_args = {}
for opt in Profiler.valid_options:
optval = getattr(args, opt, None)
if optval is not None:
prof_args[opt] = optval
if args.output is not None:
prof_args["path"] = args.output
if args.format:
prof_args["formats"] = args.format
if args.clean:
for file in os.listdir(profiler.directory):
os.remove(os.path.join(profiler.directory, file))
if len(extras) == 0:
if not args.merge_trace_events:
parser.print_usage()
exit(1)
else:
filename = extras[0]
if not args.module:
if not os.path.exists(filename):
get_logger().fatal("cannot find file {}".format(filename))
exit(1)
filename = os.path.realpath(filename)
# Replace profiler's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(filename)
sys.argv[:] = [filename, *extras[1:]]
profiler = Profiler(**prof_args)
with profiler:
if args.module:
runpy.run_module(filename)
else:
run_script(filename)
profiler.dump()
if args.merge_trace_events:
| merge_trace_events(profiler.directory) | megengine.utils.profiler.merge_trace_events |
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import getopt
import os
import runpy
import sys
from megengine.logger import get_logger
from megengine.utils.profiler import Profiler, merge_trace_events
def main():
parser = argparse.ArgumentParser(
prog="megengine.tools.profiler", description="Profiling megengine program"
)
parser.add_argument(
"-m", "--module", action="store_true", help="whether launch program as module"
)
parser.add_argument("-o", "--output", type=str, help="output file location")
parser.add_argument(
"-f",
"--format",
action="append",
type=str,
help="output file format",
choices=Profiler.valid_formats,
)
parser.add_argument(
"--merge_trace_events", action="store_true",
)
parser.add_argument(
"--clean", action="store_true",
)
for opt in Profiler.valid_options:
parser.add_argument("--" + opt, type=int, default=None)
args, extras = parser.parse_known_args(sys.argv[1:])
prof_args = {}
for opt in Profiler.valid_options:
optval = getattr(args, opt, None)
if optval is not None:
prof_args[opt] = optval
if args.output is not None:
prof_args["path"] = args.output
if args.format:
prof_args["formats"] = args.format
if args.clean:
for file in os.listdir(profiler.directory):
os.remove(os.path.join(profiler.directory, file))
if len(extras) == 0:
if not args.merge_trace_events:
parser.print_usage()
exit(1)
else:
filename = extras[0]
if not args.module:
if not os.path.exists(filename):
| get_logger() | megengine.logger.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import cv2
import megengine as mge
import megengine.data.dataset as dataset
import megengine.jit as jit
import numpy as np
from megengine.utils.http_download import download_from_url
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
class Config:
NUM_CLASSES = 21
IMG_SIZE = 512
IMG_MEAN = [103.530, 116.280, 123.675]
IMG_STD = [57.375, 57.120, 58.395]
cfg = Config()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", type=str, default=None, help="inference image")
parser.add_argument("--model_path", type=str, default=None, help="inference model")
args = parser.parse_args()
net = load_model(args.model_path)
if args.image_path is None:
download_from_url("https://data.megengine.org.cn/images/cat.jpg", "test.jpg")
img = cv2.imread("test.jpg")
else:
img = cv2.imread(args.image_path)
pred = inference(img, net)
cv2.imwrite("out.jpg", pred)
def load_model(model_path):
model_dict = | mge.load(model_path) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import cv2
import megengine as mge
import megengine.data.dataset as dataset
import megengine.jit as jit
import numpy as np
from megengine.utils.http_download import download_from_url
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
class Config:
NUM_CLASSES = 21
IMG_SIZE = 512
IMG_MEAN = [103.530, 116.280, 123.675]
IMG_STD = [57.375, 57.120, 58.395]
cfg = Config()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", type=str, default=None, help="inference image")
parser.add_argument("--model_path", type=str, default=None, help="inference model")
args = parser.parse_args()
net = load_model(args.model_path)
if args.image_path is None:
download_from_url("https://data.megengine.org.cn/images/cat.jpg", "test.jpg")
img = cv2.imread("test.jpg")
else:
img = cv2.imread(args.image_path)
pred = inference(img, net)
cv2.imwrite("out.jpg", pred)
def load_model(model_path):
model_dict = mge.load(model_path)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (model_path))
net.eval()
return net
def inference(img, net):
@ | jit.trace(symbolic=True, opt_level=2) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import cv2
import megengine as mge
import megengine.data.dataset as dataset
import megengine.jit as jit
import numpy as np
from megengine.utils.http_download import download_from_url
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
class Config:
NUM_CLASSES = 21
IMG_SIZE = 512
IMG_MEAN = [103.530, 116.280, 123.675]
IMG_STD = [57.375, 57.120, 58.395]
cfg = Config()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", type=str, default=None, help="inference image")
parser.add_argument("--model_path", type=str, default=None, help="inference model")
args = parser.parse_args()
net = load_model(args.model_path)
if args.image_path is None:
download_from_url("https://data.megengine.org.cn/images/cat.jpg", "test.jpg")
img = cv2.imread("test.jpg")
else:
img = cv2.imread(args.image_path)
pred = inference(img, net)
cv2.imwrite("out.jpg", pred)
def load_model(model_path):
model_dict = mge.load(model_path)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (model_path))
net.eval()
return net
def inference(img, net):
@jit.trace(symbolic=True, opt_level=2)
def pred_fun(data, net=None):
net.eval()
pred = net(data)
return pred
img = (img.astype("float32") - np.array(cfg.IMG_MEAN)) / np.array(cfg.IMG_STD)
orih, oriw = img.shape[:2]
img = cv2.resize(img, (cfg.IMG_SIZE, cfg.IMG_SIZE))
img = img.transpose(2, 0, 1)[np.newaxis]
data = | mge.tensor() | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import cv2
import megengine as mge
import megengine.data.dataset as dataset
import megengine.jit as jit
import numpy as np
from megengine.utils.http_download import download_from_url
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
class Config:
NUM_CLASSES = 21
IMG_SIZE = 512
IMG_MEAN = [103.530, 116.280, 123.675]
IMG_STD = [57.375, 57.120, 58.395]
cfg = Config()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", type=str, default=None, help="inference image")
parser.add_argument("--model_path", type=str, default=None, help="inference model")
args = parser.parse_args()
net = load_model(args.model_path)
if args.image_path is None:
| download_from_url("https://data.megengine.org.cn/images/cat.jpg", "test.jpg") | megengine.utils.http_download.download_from_url |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": ( | MM.Conv2d(32, 32, 3, 1, 0) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
| MM.Conv2d(32, 32, 3, 1, 0, groups=32) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": ( | MM.Conv3d(32, 32, 3, 1, 0) | megengine.module.Conv3d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
| MM.ConvTranspose2d(32, 32, 3, 1, 0) | megengine.module.ConvTranspose2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": ( | MM.BatchNorm2d(64) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": ( | MM.Linear(1000, 1000) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: MF.squeeze(x, 0),
lambda x: torch.squeeze(x, 0),
[(1, 100, 100)],
[(1, 64, 512, 16, 16)],
True,
1000,
),
(
"stack",
MF.stack,
torch.stack,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
False,
10000,
),
(
"subtensor",
lambda x: x[0:20, 10:60],
lambda x: x[0:20, 10:60],
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"topk",
lambda x: MF.topk(x, 10),
lambda x: torch.topk(x, 10),
[(100, 100)],
[(1000, 1000)],
True,
1000,
),
(
"tile",
lambda x: MF.tile(x, (2,) * len(x.shape)),
lambda x: torch.tile(x, (2,) * len(x.shape)),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"transpose",
lambda x: MF.transpose(x, list(range(len(x.shape)))[::-1]),
lambda x: torch.permute(x, list(range(len(x.shape)))[::-1]),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"where",
lambda x: MF.where(x > 0.5, x, x),
lambda x: torch.where(x > 0.5, x, x),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"uniform",
lambda x: mge.random.uniform(0, 1, x.shape),
lambda x: torch.rand(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
]
def perf_func(func, inps, reps, unpack_inps, is_mge):
if is_mge:
| mge._full_sync() | megengine._full_sync |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: MF.squeeze(x, 0),
lambda x: torch.squeeze(x, 0),
[(1, 100, 100)],
[(1, 64, 512, 16, 16)],
True,
1000,
),
(
"stack",
MF.stack,
torch.stack,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
False,
10000,
),
(
"subtensor",
lambda x: x[0:20, 10:60],
lambda x: x[0:20, 10:60],
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"topk",
lambda x: MF.topk(x, 10),
lambda x: torch.topk(x, 10),
[(100, 100)],
[(1000, 1000)],
True,
1000,
),
(
"tile",
lambda x: MF.tile(x, (2,) * len(x.shape)),
lambda x: torch.tile(x, (2,) * len(x.shape)),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"transpose",
lambda x: MF.transpose(x, list(range(len(x.shape)))[::-1]),
lambda x: torch.permute(x, list(range(len(x.shape)))[::-1]),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"where",
lambda x: MF.where(x > 0.5, x, x),
lambda x: torch.where(x > 0.5, x, x),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"uniform",
lambda x: mge.random.uniform(0, 1, x.shape),
lambda x: torch.rand(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
]
def perf_func(func, inps, reps, unpack_inps, is_mge):
if is_mge:
mge._full_sync()
tik = time.time()
for _ in range(reps):
if unpack_inps:
out = func(*inps)
else:
out = func(inps)
| mge._full_sync() | megengine._full_sync |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: MF.squeeze(x, 0),
lambda x: torch.squeeze(x, 0),
[(1, 100, 100)],
[(1, 64, 512, 16, 16)],
True,
1000,
),
(
"stack",
MF.stack,
torch.stack,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
False,
10000,
),
(
"subtensor",
lambda x: x[0:20, 10:60],
lambda x: x[0:20, 10:60],
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"topk",
lambda x: MF.topk(x, 10),
lambda x: torch.topk(x, 10),
[(100, 100)],
[(1000, 1000)],
True,
1000,
),
(
"tile",
lambda x: MF.tile(x, (2,) * len(x.shape)),
lambda x: torch.tile(x, (2,) * len(x.shape)),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"transpose",
lambda x: MF.transpose(x, list(range(len(x.shape)))[::-1]),
lambda x: torch.permute(x, list(range(len(x.shape)))[::-1]),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"where",
lambda x: MF.where(x > 0.5, x, x),
lambda x: torch.where(x > 0.5, x, x),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"uniform",
lambda x: mge.random.uniform(0, 1, x.shape),
lambda x: torch.rand(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
]
def perf_func(func, inps, reps, unpack_inps, is_mge):
if is_mge:
mge._full_sync()
tik = time.time()
for _ in range(reps):
if unpack_inps:
out = func(*inps)
else:
out = func(inps)
mge._full_sync()
else:
torch.cuda.synchronize()
with torch.no_grad():
tik = time.time()
for _ in range(reps):
if unpack_inps:
out = func(*inps)
else:
out = func(inps)
torch.cuda.synchronize()
return time.time() - tik
def get_avg_time(func, inps, reps, unpack_inps, is_mge):
# warm up
for _ in range(2):
t = perf_func(func, inps, reps, unpack_inps, is_mge)
times = []
for _ in range(5):
t = perf_func(func, inps, reps, unpack_inps, is_mge)
times.append(t)
return np.mean(times)
def get_perf_results(mge_func, torch_func, shapes, unpack_inps, reps):
inps = [np.random.randn(*shape) for shape in shapes]
inps_mge = [ | mge.tensor(inp, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: | MF.adaptive_avg_pool2d(x, (7, 7)) | megengine.functional.adaptive_avg_pool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: | MF.adaptive_max_pool2d(x, (7, 7)) | megengine.functional.adaptive_max_pool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: | MF.avg_pool2d(x, 2) | megengine.functional.avg_pool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: | MF.broadcast_to(x, (5,) + x.shape) | megengine.functional.broadcast_to |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: | MF.dropout(x, 0.5) | megengine.functional.dropout |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.