prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f( | GetVarShape() | megengine.core.ops.builtin.GetVarShape |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f( | Reduce(mode="product", axis=0) | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f( | Reduce(mode="product", axis=0) | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f( | TypeCvt(dtype=dtype) | megengine.core.ops.builtin.TypeCvt |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f( | Reduce(mode="sum") | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f( | Reduce(mode="sum_sqr") | megengine.core.ops.builtin.Reduce |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = | trace(symbolic=symbolic) | megengine.jit.trace |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = | trace(symbolic=symbolic) | megengine.jit.trace |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with | GradManager() | megengine.autodiff.grad_manager.GradManager |
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with | GradManager() | megengine.autodiff.grad_manager.GradManager |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import random
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
class DataPrefetcher:
"""
DataPrefetcher is inspired by code of following file:
https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py
It could speedup your pytorch dataloader. For more information, please check
https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789.
"""
def __init__(self, loader):
self.loader = iter(loader)
def preload(self):
try:
self.next_input, self.next_target, _, _ = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
def next(self):
inputs, target, _, _ = next(self.loader)
return inputs.numpy(), target.numpy()
def random_resize(data_loader, exp, epoch, rank, is_distributed):
tensor = | mge.tensor([1]) | megengine.tensor |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import random
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
class DataPrefetcher:
"""
DataPrefetcher is inspired by code of following file:
https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py
It could speedup your pytorch dataloader. For more information, please check
https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789.
"""
def __init__(self, loader):
self.loader = iter(loader)
def preload(self):
try:
self.next_input, self.next_target, _, _ = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
def next(self):
inputs, target, _, _ = next(self.loader)
return inputs.numpy(), target.numpy()
def random_resize(data_loader, exp, epoch, rank, is_distributed):
tensor = mge.tensor([1])
if rank == 0:
if epoch > exp.max_epoch - 10:
size = exp.input_size
else:
size = random.randint(*exp.random_size)
size = int(32 * size)
tensor *= size
if is_distributed:
tensor = | F.distributed.broadcast(tensor) | megengine.functional.distributed.broadcast |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import random
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
class DataPrefetcher:
"""
DataPrefetcher is inspired by code of following file:
https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py
It could speedup your pytorch dataloader. For more information, please check
https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789.
"""
def __init__(self, loader):
self.loader = iter(loader)
def preload(self):
try:
self.next_input, self.next_target, _, _ = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
def next(self):
inputs, target, _, _ = next(self.loader)
return inputs.numpy(), target.numpy()
def random_resize(data_loader, exp, epoch, rank, is_distributed):
tensor = mge.tensor([1])
if rank == 0:
if epoch > exp.max_epoch - 10:
size = exp.input_size
else:
size = random.randint(*exp.random_size)
size = int(32 * size)
tensor *= size
if is_distributed:
tensor = F.distributed.broadcast(tensor)
| dist.group_barrier() | megengine.distributed.group_barrier |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = | F.abs(x) | megengine.functional.abs |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = | F.abs(pred - target) | megengine.functional.abs |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = F.pow(1 - pred, gamma) * onehot * F.log(pred)
neg_part = F.pow(pred, gamma) * (1 - onehot) * F.log(1 - pred)
loss = -(alpha * pos_part + (1 - alpha) * neg_part).sum(axis=2) * mask
positive_mask = (label > 0)
return loss.sum() / F.maximum(positive_mask.sum(), 1)
def smooth_l1_loss_retina(
pred, gt, label, sigma=3, background=0, ignore_label=-1, axis=2):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis=axis) * mask).sum() / F.maximum(mask.sum(), 1)
return loss
def iou_l1_loss(pred, max_overlaps, gt, ignore_label=-1, background=0):
pred = pred.reshape(pred.shape[0], -1, max_overlaps.shape[2])
abs_x = | F.abs(pred - max_overlaps) | megengine.functional.abs |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - | F.equal(label, ignore_label) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - | F.equal(label, ignore_label) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, | F.ones([n, m, 1]) | megengine.functional.ones |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = F.pow(1 - pred, gamma) * onehot * | F.log(pred) | megengine.functional.log |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = F.pow(1 - pred, gamma) * onehot * F.log(pred)
neg_part = F.pow(pred, gamma) * (1 - onehot) * | F.log(1 - pred) | megengine.functional.log |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = | F.zeros([n, m, c + 1]) | megengine.functional.zeros |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = | F.expand_dims(vlabel, 2) | megengine.functional.expand_dims |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = | F.pow(1 - pred, gamma) | megengine.functional.pow |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = F.pow(1 - pred, gamma) * onehot * F.log(pred)
neg_part = | F.pow(pred, gamma) | megengine.functional.pow |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - | F.equal(label, background) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - | F.equal(label, ignore_label) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - | F.equal(label, ignore_label) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = F.pow(1 - pred, gamma) * onehot * F.log(pred)
neg_part = F.pow(pred, gamma) * (1 - onehot) * F.log(1 - pred)
loss = -(alpha * pos_part + (1 - alpha) * neg_part).sum(axis=2) * mask
positive_mask = (label > 0)
return loss.sum() / F.maximum(positive_mask.sum(), 1)
def smooth_l1_loss_retina(
pred, gt, label, sigma=3, background=0, ignore_label=-1, axis=2):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis=axis) * mask).sum() / F.maximum(mask.sum(), 1)
return loss
def iou_l1_loss(pred, max_overlaps, gt, ignore_label=-1, background=0):
pred = pred.reshape(pred.shape[0], -1, max_overlaps.shape[2])
abs_x = F.abs(pred - max_overlaps)
mask_bg = 1 - | F.equal(gt, background) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
return loss
def _smooth_l1_base(pred, gt, sigma):
sigma2 = sigma ** 2
cond_point = 1 / sigma2
x = pred - gt
abs_x = F.abs(x)
in_mask = abs_x < cond_point
out_mask = 1 - in_mask.astype(np.float32)
in_value = 0.5 * (sigma * x) ** 2
out_value = abs_x - 0.5 / sigma2
value = in_value * in_mask.astype(np.float32) + out_value * out_mask
return value
def _get_mask_of_label(label, background, ignore_label):
mask_fg = 1 - F.equal(label, background).astype(np.float32)
mask_ig = 1 - F.equal(label, ignore_label).astype(np.float32)
mask = mask_fg * mask_ig
return mask, mask_ig
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (1, pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(dim=1)
return loss
def smooth_l1_loss_rpn(pred, gt, label, sigma=1,
background=0, ignore_label=-1, axis=1):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis = axis) * mask).sum() / F.maximum(mask_ig.sum(), 1)
return loss
def smooth_l1_loss_rcnn_opr(
pred, gt, label, sigma = 1, background=0, ignore_label=-1):
"""
pred : (minibatch, class_num, 4)
gt : (minibatch, 4)
label : (minibatch, )
"""
broadcast_label = F.broadcast_to(label.reshape(-1, 1), (label.shape[0], pred.shape[-1]))
broadcast_mask, broadcast_mask_ig = _get_mask_of_label(
broadcast_label, background, ignore_label)
vlabel = broadcast_label * broadcast_mask
pred_corr = F.nn.indexing_one_hot(pred, vlabel.astype(np.int32), 1)
value = _smooth_l1_base(pred_corr, gt, sigma)
loss = (value * broadcast_mask).sum(axis=1)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = F.abs(pred - target)
in_mask = abs_x < beta
out_mask = 1 - in_mask.astype(np.float32)
in_loss = 0.5 * abs_x ** 2 / beta
out_loss = abs_x - 0.5 * beta
loss = in_loss * in_mask.astype(np.float32) + out_loss * out_mask
return loss.sum(axis=1)
def sigmoid_cross_entropy_retina(
pred, label, ignore_label=-1, background=0, alpha=0.5, gamma=0):
device = pred.device
mask = 1 - F.equal(label, ignore_label).astype(np.float32)
vlabel = label * mask
n, m, c = pred.shape
zero_mat = F.zeros([n, m, c + 1]).to(device)
index = F.expand_dims(vlabel, 2).astype(np.int32)
one_hot = F.scatter(zero_mat, 2, index, F.ones([n, m, 1]))
onehot = one_hot[:, :, 1:]
pos_part = F.pow(1 - pred, gamma) * onehot * F.log(pred)
neg_part = F.pow(pred, gamma) * (1 - onehot) * F.log(1 - pred)
loss = -(alpha * pos_part + (1 - alpha) * neg_part).sum(axis=2) * mask
positive_mask = (label > 0)
return loss.sum() / F.maximum(positive_mask.sum(), 1)
def smooth_l1_loss_retina(
pred, gt, label, sigma=3, background=0, ignore_label=-1, axis=2):
value = _smooth_l1_base(pred, gt, sigma)
mask, mask_ig = _get_mask_of_label(label, background, ignore_label)
loss = (value.sum(axis=axis) * mask).sum() / F.maximum(mask.sum(), 1)
return loss
def iou_l1_loss(pred, max_overlaps, gt, ignore_label=-1, background=0):
pred = pred.reshape(pred.shape[0], -1, max_overlaps.shape[2])
abs_x = F.abs(pred - max_overlaps)
mask_bg = 1 - F.equal(gt, background).astype(np.float32)
mask_ig = 1 - | F.equal(gt, ignore_label) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log( | F.exp(pred) | megengine.functional.exp |
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def softmax_loss(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask.astype(np.float32)
loss = -(F.nn.indexing_one_hot(log_prob, vlabel.astype(np.int32), 1).flatten() * mask)
loss = loss.sum() / F.maximum(mask.sum(), 1)
return loss
def softmax_loss_opr(pred, label, ignore_label=-1):
max_pred = pred.max(axis=1, keepdims=True).detach()
pred -= max_pred
log_prob = pred - F.log( | F.exp(pred) | megengine.functional.exp |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@ | trace(symbolic=trace_mode) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@ | trace(symbolic=trace_mode) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@ | trace(symbolic=trace_mode, opt_level=2) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = | optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4) | megengine.optimizer.SGD |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@ | trace(symbolic=trace_mode, opt_level=2) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
| AutoNaming.clear() | megengine.utils.naming.AutoNaming.clear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = | tensor([4]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = | cgtools.GraphInference(file) | megengine.utils.comp_graph_tools.GraphInference |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = | tensor([3]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = | cgtools.GraphInference(file) | megengine.utils.comp_graph_tools.GraphInference |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = | tensor([3]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = | G.load_graph(file) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@ | trace(symbolic=trace_mode, profiling=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@ | trace(symbolic=True, opt_level=0, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@ | trace(symbolic=True, opt_level=1, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = | tensor(0.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@ | trace(symbolic=True, opt_level=0, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@ | trace(symbolic=True, opt_level=1, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = | tensor(val) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = | tensor(0.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@ | trace(symbolic=True, opt_level=0, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@ | trace(symbolic=True, opt_level=1, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = | G.load_graph(out) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = | cgtools.get_oprs_seq(outputs) | megengine.utils.comp_graph_tools.get_oprs_seq |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = | G.load_graph(out) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = | cgtools.get_oprs_seq(outputs) | megengine.utils.comp_graph_tools.get_oprs_seq |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@ | trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = | G.load_graph(out) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@ | trace(capture_as_const=True, symbolic_shape=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = | tensor([0], dtype=np.int32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@ | trace(symbolic=trace_mode, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = | tensor([5, 2, 7, 1, 0, 3, 2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = | tensor([1, 2, 3, 4]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = | tensor([5, 6, 7, 8]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = | tensor([9, 0, 1, 2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@ | trace(symbolic=trace_mode, capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@ | trace(symbolic=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), tensor([2]))
@ | trace(symbolic=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), tensor([2]))
@trace(symbolic=False)
def f(x, shape):
y = F.broadcast_to(x, shape)
return y
f(x1, shape)
f(x2, shape)
def test_clip():
x = tensor(np.random.randn(10, 10))
@ | trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@ | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = | F.topk(c, 3) | megengine.functional.topk |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@ | trace(symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = | tensor([1]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f( | tensor(1.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g( | tensor(1.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return | exp(x) | megengine.functional.exp |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f( | tensor(5.0) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert | isscalar(b) | megengine.core.tensor.utils.isscalar |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = | F.topk(x, 3) | megengine.functional.topk |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = | F.vision.warp_perspective(x, M, (2, 2)) | megengine.functional.vision.warp_perspective |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = | F.broadcast_to(x, (3, 4, 5)) | megengine.functional.broadcast_to |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = | F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20) | megengine.functional.vision.nms |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = ( | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), | tensor([2]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), tensor([2]))
@trace(symbolic=False)
def f(x, shape):
y = | F.broadcast_to(x, shape) | megengine.functional.broadcast_to |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inspect
import io
import itertools
from tempfile import mkstemp
import numpy as np
import pytest
import megengine.core.tensor.megbrain_graph as G
import megengine.functional as F
import megengine.optimizer as optim
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, tensor
from megengine.autodiff import GradManager
from megengine.core._trace_option import set_symbolic_shape
from megengine.core.ops import builtin as ops
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.utils import isscalar
from megengine.functional import exp, log
from megengine.jit import exclude_from_trace, trace
from megengine.module import Module
from megengine.random import normal, uniform
from megengine.utils.naming import AutoNaming
@pytest.mark.parametrize("trace_mode", [False, True])
@pytest.mark.parametrize("return_mode", ["Value", "Tuple", "List", "Dict"])
def test_trace(trace_mode, return_mode):
@trace(symbolic=trace_mode)
def f(x):
if return_mode == "Tuple":
return (-x,)
elif return_mode == "List":
return [-x]
elif return_mode == "Dict":
return {"neg": -x}
else:
return -x
def get_numpy(y):
if return_mode == "Tuple" or return_mode == "List":
return y[0].numpy()
elif return_mode == "Dict":
return y["neg"].numpy()
return y.numpy()
x = tensor([1])
y = get_numpy(f(x))
for i in range(3):
np.testing.assert_equal(get_numpy(f(x)), y)
def test_output_copy_trace():
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.0], dtype=np.float32)
def forward(self, x):
x = x * self.a
# will result into a copy of output in grad
x = F.exp(x)
return x
ys = {False: [], True: []}
for symbolic in [False, True]:
net = Simple()
gm = GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
data = tensor(np.arange(4).reshape(2, 2), dtype="float32")
@trace(symbolic=symbolic)
def train_func(d):
with gm:
loss = net(d)
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = train_func(data).numpy()
ys[symbolic].append(y)
for i in range(3):
np.testing.assert_equal(ys[False][i], ys[True][i])
@pytest.mark.parametrize("trace_mode", [False, True])
def test_exclude_from_trace(trace_mode):
@trace(symbolic=trace_mode)
def f(x):
x = -x
with exclude_from_trace():
if i % 2:
x = -x
x = -x
return x
x = tensor([1])
for i in range(3):
y = f(x).numpy()
np.testing.assert_equal(f(x).numpy(), y)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse(trace_mode):
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f(a, b):
base = 0
c = b - a
_, idx = F.topk(c, 3)
# internally, biased_idx will be idx as gopt will ignore the addition
biased_idx = base + idx
return biased_idx
a = tensor(np.ones((7, 2)), dtype=np.int32)
b = tensor(2 * np.ones((7, 2)), dtype=np.float32)
for i in range(3):
y = f(a, b)
y.numpy()
@pytest.mark.parametrize("trace_mode", [False, True])
def test_elemwise_fuse_in_grad(trace_mode):
w = Parameter(np.ones([4, 6]), dtype="float32")
gm = GradManager().attach(w)
opt = optim.SGD([w], lr=0.01, momentum=0.9, weight_decay=5e-4)
# explicitly declare opt_level as 2
@trace(symbolic=trace_mode, opt_level=2)
def f():
with gm:
wm = F.sum(w ** 2, axis=1) ** 0.5
loss = wm.mean()
gm.backward(loss)
opt.step().clear_grad()
return loss
for i in range(3):
y = f()
y.numpy()
def test_print_in_trace():
for symbolic in [False]: # cannot read value in symbolic mode
@trace(symbolic=symbolic)
def f(x):
nonlocal buf
x = -x
buf = x.numpy()
x = -x
return x
buf = None
x = tensor([1])
for i in range(3):
y = f(x).numpy()
z = buf
buf = None
np.testing.assert_equal(f(x).numpy(), y)
np.testing.assert_equal(z, buf)
def test_dump():
@trace(symbolic=True, capture_as_const=True)
def f(a, b):
return a + b
# prevent from remaining scope from exception test
AutoNaming.clear()
a = tensor([2])
b = tensor([4])
y = f(a, b).numpy()
for i in range(3):
np.testing.assert_equal(f(a, b).numpy(), y)
file = io.BytesIO()
dump_info = f.dump(file)
assert dump_info.nr_opr == 3
np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
np.testing.assert_equal(dump_info.outputs, ["ADD"])
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(a, b)).values())[0]
np.testing.assert_equal(result[0], y)
def test_capture_dump():
a = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * a
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file)
file.seek(0)
infer_cg = cgtools.GraphInference(file)
result = list((infer_cg.run(x)).values())[0]
np.testing.assert_equal(result[0], y)
def test_dump_volatile():
p = tensor([2])
@trace(symbolic=True, capture_as_const=True)
def f(x):
return x * p
x = tensor([3])
y = f(x).numpy()
for i in range(3):
np.testing.assert_equal(f(x).numpy(), y)
file = io.BytesIO()
f.dump(file, optimize_for_inference=False)
file.seek(0)
cg, _, outputs = G.load_graph(file)
(out,) = outputs
assert (
cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
== "ImmutableTensor"
)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_profiler(trace_mode):
@trace(symbolic=trace_mode, profiling=True)
def f(x):
return -x
x = tensor([1])
y = f(x).numpy()
f(x)
f(x) # XXX: has to run twice
out = f.get_profile()
assert out.get("profiler")
def test_goptions():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
# directly return x / x will not trigger gopt
# since there's no way to tell the two x are the same
y = 2.0 * x
return y / y
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
y = 2.0 * x
return y / y
d = tensor(0.0)
assert not np.isfinite(f(d).numpy())
np.testing.assert_equal(g(d).numpy().item(), 1.0)
def test_goptions_log_sum_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x, y):
return log(exp(x) + exp(y))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x, y):
return log(exp(x) + exp(y))
val = 1.0e4
d = tensor(val)
o = tensor(0.0)
assert not np.isfinite(f(d, o).numpy())
np.testing.assert_almost_equal(g(d, o), val)
def test_goptions_log_exp():
@trace(symbolic=True, opt_level=0, capture_as_const=True)
def f(x):
return log(exp(x))
@trace(symbolic=True, opt_level=1, capture_as_const=True)
def g(x):
return log(exp(x))
f(tensor(1.0))
_, out = mkstemp()
f.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_1 = cgtools.get_oprs_seq(outputs)
g(tensor(1.0))
g.dump(out, optimize_for_inference=False)
*_, outputs = G.load_graph(out)
oprs_2 = cgtools.get_oprs_seq(outputs)
assert len(oprs_1) - len(oprs_2) == 2
def test_optimize_for_inference():
@trace(symbolic=True, capture_as_const=True)
def f(x):
return exp(x)
_, out = mkstemp()
f(tensor(5.0))
f.dump(out, enable_io16xc32=True)
res = G.load_graph(out)
computing_input = res.output_vars_list[0].owner.inputs[0]
assert computing_input.dtype == np.float16
def test_optimize_for_inference_broadcast():
a = tensor(np.ones(1, dtype=np.float32))
@trace(capture_as_const=True, symbolic_shape=True)
def f():
return a._broadcast(tensor([1, 10], dtype=np.int32))
f()
f.dump(io.BytesIO())
def test_trace_cvt_bool():
x = tensor([0], dtype=np.int32)
@trace(symbolic=True)
def f(x):
a = x.shape
b = a[0]
assert isscalar(b)
return b == 0
for i in range(3):
np.testing.assert_equal(f(x).numpy(), False)
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_reshape(trace_mode):
x1 = tensor(np.random.randn(2, 10, 10))
x2 = tensor(np.random.randn(4, 10, 10))
x3 = tensor(np.random.randn(8, 10, 10))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = x.reshape(x.shape[0], 100)
return y
f(x1)
f(x2)
f(x3)
def test_trace_topk():
x = tensor([5, 2, 7, 1, 0, 3, 2])
@trace(symbolic=True)
def f(x):
y = F.topk(x, 3)
np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
return y
for i in range(3):
f(x)
def test_trace_warp_perspective():
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
@trace(symbolic=True)
def f(x, M):
out = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
return out
for i in range(3):
f(x, M)
def test_raise_on_trace():
step_count = 0
catch_count = 0
bad_step = 10
class CatchMe(Exception):
pass
a = tensor([1, 2, 3, 4])
b = tensor([5, 6, 7, 8])
c = tensor([9, 0, 1, 2])
@trace
def add_abc(a, b, c):
ps = a + b
result = ps + c
if step_count == bad_step:
raise CatchMe("catch me")
return result
for i in range(100):
try:
d = add_abc(a, b, c)
except CatchMe as e:
catch_count += 1
else:
np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
step_count += 1
assert catch_count == 1
@pytest.mark.parametrize("trace_mode", [False, True])
def test_trace_broadcast(trace_mode):
x1 = tensor(np.random.randn(3, 1, 1))
x2 = tensor(np.random.randn(1, 4, 1))
x3 = tensor(np.random.randn(1, 1, 5))
@trace(symbolic=trace_mode, capture_as_const=True)
def f(x):
y = F.broadcast_to(x, (3, 4, 5))
return y
f(x1)
f(x2)
f(x3)
def test_trace_nms():
def make_inputs(n):
boxes = np.zeros((n, 4))
boxes[:, :2] = np.random.rand(n, 2) * 100
boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
scores = np.random.rand(n)
return tensor(boxes), tensor(scores)
@trace(symbolic=False)
def f(boxes, scores):
# with tracing, max_output must be specified
results = F.vision.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
# without tracing, max output can be inferred inside nms
with exclude_from_trace():
_ = F.vision.nms(boxes, scores=scores, iou_thresh=0.5)
return results
f(*make_inputs(10))
f(*make_inputs(20))
f(*make_inputs(30))
def test_trace_valid_broadcast():
x1 = tensor(np.random.randn(1, 1))
x2 = tensor(np.random.randn(1, 2))
shape = (tensor([2]), tensor([2]))
@trace(symbolic=False)
def f(x, shape):
y = F.broadcast_to(x, shape)
return y
f(x1, shape)
f(x2, shape)
def test_clip():
x = tensor(np.random.randn(10, 10))
@trace(symbolic=True)
def f(x, lower, upper):
y = | F.clip(x, lower, upper) | megengine.functional.clip |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.