prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Tuple, Union
import numpy as np
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ..qat import conv as QAT
from .module import QuantizedModule
class Conv2d(Float.Conv2d, QuantizedModule):
r"""quantized version of :class:`~.qat.conv.Conv2d`."""
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
dtype=None,
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.output_dtype = dtype
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = mgb.dtype.get_scale(self.weight.dtype)
bias_scale = inp_scale * w_scale
return conv_bias_activation(
inp,
self.weight,
self.bias.astype( | mgb.dtype.qint32(bias_scale) | megengine._internal.dtype.qint32 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core import Buffer, Graph, Parameter
from megengine.module import Conv2d
from megengine.test import assertTensorClose
def test_set_value():
v0 = np.random.random((2, 3)).astype(np.float32)
param = | Parameter(v0) | megengine.core.Parameter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core import Buffer, Graph, Parameter
from megengine.module import Conv2d
from megengine.test import assertTensorClose
def test_set_value():
v0 = np.random.random((2, 3)).astype(np.float32)
param = Parameter(v0)
v1 = np.random.random((2, 3)).astype(np.float32)
param.set_value(v1)
assertTensorClose(param.numpy(), v1, max_err=5e-6)
v2 = np.random.random((3, 3)).astype(np.float32)
# TODO: add this
# with pytest.raises(ValueError):
# param.set_value(v2)
assertTensorClose(param.numpy(), v1, max_err=5e-6)
def test_fill():
a = Buffer(np.zeros((2, 3), dtype=np.float32))
a.fill(3)
assertTensorClose(a.numpy(), np.full((2, 3), 3, dtype=np.float32))
a.fill(124.568)
assertTensorClose(a.numpy(), np.full((2, 3), 124.568, dtype=np.float32))
# TODO: remove or rewrite following test
# def test_attach():
# p_ = np.random.random((2, 3)).astype(np.float32)
# with Graph() as g:
# g.set_option('eager_evaluation', False)
# p = Parameter(p_)
# v = p * 2
# f = compile(v, None)
# out, = f()
# assertTensorClose(out, p_ * 2)
# F.add_update(p, p)
# out, = f()
# assertTensorClose(out, p_ * 4)
# TODO: remove or rewrite following test
# def test_module_attach():
# v = np.random.random((1, 3, 64, 64)).astype(np.float32)
# net = Conv2d(3, 16, 3)
# with Graph() as g:
# g.set_option('eager_evaluation', False)
# data0 = Input("data")
# f = compile(net(data0), None)
# out0, = f(data=v)
# data1 = Input("data", value=v)
# out1 = net(data1)
# assertTensorClose(out0, out1.numpy())
def test_shape_warning():
with | Graph() | megengine.core.Graph |
#!/usr/bin/env python3
import argparse
import math
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine import jit, tensor
class ConvNet(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(in_channels=3, out_channels=1, kernel_size=3, bias=False)
def forward(self, input):
x = self.conv1(input)
return x
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="dump mge model for add_demo",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--dir",
help="set the dir where the model to dump",
default=".",
type=str,
)
args = parser.parse_args()
net = ConvNet()
net.eval()
@ | jit.trace(symbolic=True, capture_as_const=True) | megengine.jit.trace |
#!/usr/bin/env python3
import argparse
import math
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine import jit, tensor
class ConvNet(M.Module):
def __init__(self):
super().__init__()
self.conv1 = | M.Conv2d(in_channels=3, out_channels=1, kernel_size=3, bias=False) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = | mgb.opr.virtual_dep([x, self._checkpoint]) | megengine._internal.opr.virtual_dep |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = mgb.opr.virtual_dep(args)
def _mark_impure(self, x):
assert self._active
ret = x
if isinstance(x, Tensor):
x = x._symvar
if not isinstance(x, mgb.SymbolVar):
raise TypeError
self._outspec.append(x)
self._insert_checkpoint(x)
return ret
def _make_proxies(self, args):
assert isinstance(args, (tuple, list))
for x in args:
assert isinstance(x, Tensor)
return tuple(tensor(dtype=x.dtype, device=x.device) for x in args)
def _forward(self, srcs, dests, checkpoint=True):
# pseudo-op: does not run under static graph; traced
# TODO: use shared memory
assert len(srcs) == len(dests)
if not self._active:
for s, d in zip(srcs, dests):
d.set_value(s, share=False)
return
jobs = []
for s, d in zip(srcs, dests):
def callback(value, dest=d):
dest.set_value(value, share=False)
s = self._insert_barrier(s._symvar)
# NOTE: callback immediately fire in eager graph
jobs.append(mgb.opr.callback_injector(s, callback))
self._outspec.extend(jobs)
if checkpoint:
self._insert_checkpoint(*jobs, no_barrier=True)
def _forward_inputs(self, *args, **kwargs):
if self._kwargs is None:
self._kwargs = kwargs
elif self._kwargs != kwargs:
raise ValueError("kwargs must not change between invocations")
if self._args is None:
self._args = []
for i in args:
if isinstance(i, Tensor):
self._args.append(tensor(dtype=i.dtype, device=i.device))
self._args[-1].set_value(i, share=False)
else:
self._args.append(tensor(i))
else:
if not len(args) == len(self._args):
raise TypeError
for i, proxy in zip(args, self._args):
proxy.set_value(i, share=False)
# XXX: sync?
def _make_outputs(self, outputs):
if outputs is None:
self._outputs = None
return
if isinstance(outputs, Tensor):
# no one is able to call barrier after this, so no need to checkpoint
# but checkpoint do little harm anyway
(self._outputs,) = self._make_proxies([outputs])
return
if not isinstance(outputs, (tuple, list)):
raise TypeError("should return (tuple of) tensor")
for i in outputs:
if not isinstance(i, Tensor):
raise TypeError("should return (tuple of) tensor")
self._outputs = self._make_proxies(outputs)
def _foward_outputs(self, outputs):
# pseudo-op: does not run under static graph; traced
if self._outputs is unset:
self._make_outputs(outputs)
if self._outputs is None:
if outputs is not None:
raise TypeError("should return None")
elif isinstance(self._outputs, Tensor):
if not isinstance(outputs, Tensor):
raise TypeError("should return a tensor")
self._forward([outputs], [self._outputs])
else:
assert isinstance(self._outputs, tuple)
def check():
if not isinstance(outputs, (tuple, list)):
return False
if len(self._outputs) != len(outputs):
return False
for x in outputs:
if not isinstance(x, Tensor):
return False
return True
if not check():
raise TypeError(
"should return tuple of %d tensors" % len(self._outputs)
)
self._forward(outputs, self._outputs)
def _apply_graph_options(self, cg):
# graph opt level
if self._graph_opt_level is not None:
cg.set_option("graph_opt_level", self._graph_opt_level)
# log level
if self._log_level is not None:
cg.set_option("log_level", self._log_level)
# sublinear
if self._sublinear_memory_config is not None:
cg.set_option("enable_sublinear_memory_opt", True)
cg.set_option(
"sublinear_mem_cofig.lb_memory",
self._sublinear_memory_config.lb_memory,
)
cg.set_option(
"sublinear_mem_cofig.genetic_nr_iter",
self._sublinear_memory_config.genetic_nr_iter,
)
cg.set_option(
"sublinear_mem_cofig.genetic_pool_size",
self._sublinear_memory_config.genetic_pool_size,
)
cg.set_option(
"sublinear_mem_cofig.thresh_nr_try",
self._sublinear_memory_config.thresh_nr_try,
)
cg.set_option(
"sublinear_mem_cofig.num_worker",
self._sublinear_memory_config.num_worker,
)
# pack allreduce
if self._allreduce_pack_max_size is not None:
cg.set_option("allreduce_pack_max_size", self._allreduce_pack_max_size)
# profile
if self._profiling:
self._profiler = CompGraphProfiler(cg)
def _get_graph(self, eager):
if eager:
if not hasattr(self, "_eager_graph"):
# pylint: disable=attribute-defined-outside-init
self._eager_graph = graph.Graph(eager_evaluation=True)
self._apply_graph_options(self._eager_graph)
return self._eager_graph
else:
if not hasattr(self, "_static_graph"):
# pylint: disable=attribute-defined-outside-init
self._static_graph = graph.Graph(eager_evaluation=False)
self._apply_graph_options(self._static_graph)
return self._static_graph
@contextlib.contextmanager
def _prepare(self, args, kwargs, enable):
# prepare for execution
self._forward_inputs(*args, **kwargs)
if not enable:
# XXX: use our own graph here?
cg = None
elif self._status == self._FINISHED:
cg = None
elif self._symbolic:
cg = self._get_graph(eager=False)
else:
cg = self._get_graph(eager=True)
try:
# NOTE: always trace in a new graph, so capturing an undetached tensor
# will never work (would work if tracing in default graph)
if cg is None:
yield
else:
with cg:
yield
finally:
# XXX: properly release memory
if cg:
cg.clear_device_memory()
@contextlib.contextmanager
def _activate(self):
# prepare for tracing
if self._status != self._UNSTARTED:
raise RuntimeError("cannot trace a second time")
if type(self)._active_instance is not None:
raise RuntimeError("nested trace is unsupported")
self._status = self._STARTED
type(self)._active_instance = self
self._user_cache = {}
try:
yield
finally:
self._status = self._FINISHED
self._user_cache = None
type(self)._active_instance = None
def _run_wrapped(self):
outputs = self.__wrapped__(*self._args, **self._kwargs)
self._foward_outputs(outputs)
return outputs
def _do_trace(self):
with self._activate():
self._outspec = []
outputs = self._run_wrapped()
if outputs is None:
self._sym_outputs = None
else:
if isinstance(outputs, Tensor):
outputs = [outputs]
# _run_wrapped has checked validity of outputs
self._sym_outputs = tuple(i._symvar for i in outputs)
mgb.comp_graph_tools.set_priority_to_id(self._outspec)
self._compiled_func = graph.get_default_graph().compile(None, self._outspec)
def trace(self, *args: Tensor, **kwargs):
"""
Trace wrapped callable with provided arguments.
"""
with self._prepare(args, kwargs, enable=True):
self._do_trace()
return self
def __call__(self, *args: Tensor, **kwargs):
"""
Evaluate on provided arguments, using compiled trace
instead of the original callable if applicable.
:return: ``None`` or :class:`~.Tensor` or tuple of :class:`~.Tensor`, depending on the
return value of wrapped callable.
"""
with self._prepare(args, kwargs, enable=self.enabled):
if not self.enabled:
self._run_wrapped()
elif self._status == self._FINISHED:
self._compiled_func()
else:
if self._status == self._UNSTARTED:
self._do_trace()
if self._symbolic:
self._compiled_func()
return self._outputs
def dump(
self,
fpath,
*,
arg_names=None,
append=False,
optimize_for_inference=False,
**kwargs
):
"""
Serialize trace to file system.
:param fpath: positional only argument. Path of output file.
:param arg_names: names of the input tensors in the traced function.
:param append: whether output is appended to ``fpath``.
:param optimize_for_inference: whether to enable optimize_for_inference
pass before dump.
:param enable_io16xc32: whether to use float16 for I/O between oprs and use
float32 as internal computation precision. Note the output var would be
changed to float16.
:param enable_ioc16: whether to use float16 for both I/O and computation
precision.
:param enable_hwcd4: whether to use NHWCD4 data layout. This is faster on some
OpenCL backend.
:param enable_nchw88: whether to use NCHW88 data layout. it currently
used in X86 AVX backend.
:param enable_nchw44: whether to use NCHW44 data layout. it currently
used in arm backend.
:param enable_nchw44_dot: whether to use NCHW44_dot data layout. it currently
used in armv8.2+dotprod backend.
:param enable_nchw4: whether to use NCHW4 data layout. it currently
used in nvidia backend(based on cudnn).
:param enable_nchw32: whether to use NCHW32 data layout. it currently
used in nvidia backend with tensorcore(based on cudnn).
:param enable_chwn4: whether to use CHWN4 data layout. it currently
used in nvidia backend with tensorcore.
:param enable_fuse_conv_bias_nonlinearity: whether to fuse conv+bias+nonlinearty
into one opr.
:param enable_fuse_conv_bias_with_z: whether to fuse conv_bias with z
input for inference on nvidia backend(this optimization pass will
result in mismatch of the precision of output of training and
inference)
"""
if self._status != self._FINISHED:
raise ValueError("not traced")
assert isinstance(self._sym_outputs, (tuple, type(None)))
if not self._sym_outputs:
raise ValueError("not outputs")
if arg_names is None:
arg_names = ["arg_%d" % i for i in range(len(self._args))]
elif len(arg_names) != len(self._args):
raise ValueError(
"len(arg_names) should be {}, got {}".format(
len(self._args), len(arg_names)
)
)
optimize_for_inference_args_map = {
"enable_io16xc32": "f16_io_f32_comp",
"enable_ioc16": "f16_io_comp",
"enable_hwcd4": "use_nhwcd4",
"enable_nchw4": "use_nchw4",
"enable_nchw88": "use_nchw88",
"enable_nchw32": "use_nchw32",
"enable_nchw44": "use_nchw44",
"enable_nchw44_dot": "use_nchw44_dot",
"enable_chwn4": "use_chwn4",
"enable_fuse_conv_bias_nonlinearity": "fuse_conv_bias_nonlinearity",
"enable_fuse_conv_bias_with_z": "fuse_conv_bias_with_z",
}
if optimize_for_inference:
optimize_for_inference_kwargs = {}
for k, v in optimize_for_inference_args_map.items():
if kwargs.pop(k, False):
optimize_for_inference_kwargs[v] = True
else:
for k in optimize_for_inference_args_map:
if kwargs.get(k, False):
raise ValueError(
"cannot set %s when optimize_for_inference is not set" % k
)
if kwargs:
raise ValueError("unknown options: %s" % list(kwargs))
cg = self._sym_outputs[0].owner_graph
replace = {}
for t, name in zip(self._args, arg_names):
# relies on symvar dedup
s = t.__mgb_symvar__(comp_graph=cg)
replace[s] = mgb.make_arg(
t.device, cg, dtype=t.dtype, shape=t.shape, name=name
)
# Convert VolatileSharedDeviceTensor to SharedDeviceTensor,
# otherwise some optimizations would not work. The conversion is
# safe because there simply is no way (using builtin ops) to make
# a VolatileSharedDeviceTensor actually volatile.
for s in mgb.cgtools.get_dep_vars(
self._sym_outputs, "VolatileSharedDeviceTensor"
):
if s in replace:
continue # is an input
replace[s] = mgb.SharedND._from_symvar(s).symvar(
cg, name=s.name, volatile=False
)
sym_outputs = | mgb.cgtools.replace_vars(self._sym_outputs, replace) | megengine._internal.cgtools.replace_vars |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = mgb.opr.virtual_dep(args)
def _mark_impure(self, x):
assert self._active
ret = x
if isinstance(x, Tensor):
x = x._symvar
if not isinstance(x, mgb.SymbolVar):
raise TypeError
self._outspec.append(x)
self._insert_checkpoint(x)
return ret
def _make_proxies(self, args):
assert isinstance(args, (tuple, list))
for x in args:
assert isinstance(x, Tensor)
return tuple(tensor(dtype=x.dtype, device=x.device) for x in args)
def _forward(self, srcs, dests, checkpoint=True):
# pseudo-op: does not run under static graph; traced
# TODO: use shared memory
assert len(srcs) == len(dests)
if not self._active:
for s, d in zip(srcs, dests):
d.set_value(s, share=False)
return
jobs = []
for s, d in zip(srcs, dests):
def callback(value, dest=d):
dest.set_value(value, share=False)
s = self._insert_barrier(s._symvar)
# NOTE: callback immediately fire in eager graph
jobs.append(mgb.opr.callback_injector(s, callback))
self._outspec.extend(jobs)
if checkpoint:
self._insert_checkpoint(*jobs, no_barrier=True)
def _forward_inputs(self, *args, **kwargs):
if self._kwargs is None:
self._kwargs = kwargs
elif self._kwargs != kwargs:
raise ValueError("kwargs must not change between invocations")
if self._args is None:
self._args = []
for i in args:
if isinstance(i, Tensor):
self._args.append(tensor(dtype=i.dtype, device=i.device))
self._args[-1].set_value(i, share=False)
else:
self._args.append(tensor(i))
else:
if not len(args) == len(self._args):
raise TypeError
for i, proxy in zip(args, self._args):
proxy.set_value(i, share=False)
# XXX: sync?
def _make_outputs(self, outputs):
if outputs is None:
self._outputs = None
return
if isinstance(outputs, Tensor):
# no one is able to call barrier after this, so no need to checkpoint
# but checkpoint do little harm anyway
(self._outputs,) = self._make_proxies([outputs])
return
if not isinstance(outputs, (tuple, list)):
raise TypeError("should return (tuple of) tensor")
for i in outputs:
if not isinstance(i, Tensor):
raise TypeError("should return (tuple of) tensor")
self._outputs = self._make_proxies(outputs)
def _foward_outputs(self, outputs):
# pseudo-op: does not run under static graph; traced
if self._outputs is unset:
self._make_outputs(outputs)
if self._outputs is None:
if outputs is not None:
raise TypeError("should return None")
elif isinstance(self._outputs, Tensor):
if not isinstance(outputs, Tensor):
raise TypeError("should return a tensor")
self._forward([outputs], [self._outputs])
else:
assert isinstance(self._outputs, tuple)
def check():
if not isinstance(outputs, (tuple, list)):
return False
if len(self._outputs) != len(outputs):
return False
for x in outputs:
if not isinstance(x, Tensor):
return False
return True
if not check():
raise TypeError(
"should return tuple of %d tensors" % len(self._outputs)
)
self._forward(outputs, self._outputs)
def _apply_graph_options(self, cg):
# graph opt level
if self._graph_opt_level is not None:
cg.set_option("graph_opt_level", self._graph_opt_level)
# log level
if self._log_level is not None:
cg.set_option("log_level", self._log_level)
# sublinear
if self._sublinear_memory_config is not None:
cg.set_option("enable_sublinear_memory_opt", True)
cg.set_option(
"sublinear_mem_cofig.lb_memory",
self._sublinear_memory_config.lb_memory,
)
cg.set_option(
"sublinear_mem_cofig.genetic_nr_iter",
self._sublinear_memory_config.genetic_nr_iter,
)
cg.set_option(
"sublinear_mem_cofig.genetic_pool_size",
self._sublinear_memory_config.genetic_pool_size,
)
cg.set_option(
"sublinear_mem_cofig.thresh_nr_try",
self._sublinear_memory_config.thresh_nr_try,
)
cg.set_option(
"sublinear_mem_cofig.num_worker",
self._sublinear_memory_config.num_worker,
)
# pack allreduce
if self._allreduce_pack_max_size is not None:
cg.set_option("allreduce_pack_max_size", self._allreduce_pack_max_size)
# profile
if self._profiling:
self._profiler = CompGraphProfiler(cg)
def _get_graph(self, eager):
if eager:
if not hasattr(self, "_eager_graph"):
# pylint: disable=attribute-defined-outside-init
self._eager_graph = graph.Graph(eager_evaluation=True)
self._apply_graph_options(self._eager_graph)
return self._eager_graph
else:
if not hasattr(self, "_static_graph"):
# pylint: disable=attribute-defined-outside-init
self._static_graph = graph.Graph(eager_evaluation=False)
self._apply_graph_options(self._static_graph)
return self._static_graph
@contextlib.contextmanager
def _prepare(self, args, kwargs, enable):
# prepare for execution
self._forward_inputs(*args, **kwargs)
if not enable:
# XXX: use our own graph here?
cg = None
elif self._status == self._FINISHED:
cg = None
elif self._symbolic:
cg = self._get_graph(eager=False)
else:
cg = self._get_graph(eager=True)
try:
# NOTE: always trace in a new graph, so capturing an undetached tensor
# will never work (would work if tracing in default graph)
if cg is None:
yield
else:
with cg:
yield
finally:
# XXX: properly release memory
if cg:
cg.clear_device_memory()
@contextlib.contextmanager
def _activate(self):
# prepare for tracing
if self._status != self._UNSTARTED:
raise RuntimeError("cannot trace a second time")
if type(self)._active_instance is not None:
raise RuntimeError("nested trace is unsupported")
self._status = self._STARTED
type(self)._active_instance = self
self._user_cache = {}
try:
yield
finally:
self._status = self._FINISHED
self._user_cache = None
type(self)._active_instance = None
def _run_wrapped(self):
outputs = self.__wrapped__(*self._args, **self._kwargs)
self._foward_outputs(outputs)
return outputs
def _do_trace(self):
with self._activate():
self._outspec = []
outputs = self._run_wrapped()
if outputs is None:
self._sym_outputs = None
else:
if isinstance(outputs, Tensor):
outputs = [outputs]
# _run_wrapped has checked validity of outputs
self._sym_outputs = tuple(i._symvar for i in outputs)
mgb.comp_graph_tools.set_priority_to_id(self._outspec)
self._compiled_func = graph.get_default_graph().compile(None, self._outspec)
def trace(self, *args: Tensor, **kwargs):
"""
Trace wrapped callable with provided arguments.
"""
with self._prepare(args, kwargs, enable=True):
self._do_trace()
return self
def __call__(self, *args: Tensor, **kwargs):
"""
Evaluate on provided arguments, using compiled trace
instead of the original callable if applicable.
:return: ``None`` or :class:`~.Tensor` or tuple of :class:`~.Tensor`, depending on the
return value of wrapped callable.
"""
with self._prepare(args, kwargs, enable=self.enabled):
if not self.enabled:
self._run_wrapped()
elif self._status == self._FINISHED:
self._compiled_func()
else:
if self._status == self._UNSTARTED:
self._do_trace()
if self._symbolic:
self._compiled_func()
return self._outputs
def dump(
self,
fpath,
*,
arg_names=None,
append=False,
optimize_for_inference=False,
**kwargs
):
"""
Serialize trace to file system.
:param fpath: positional only argument. Path of output file.
:param arg_names: names of the input tensors in the traced function.
:param append: whether output is appended to ``fpath``.
:param optimize_for_inference: whether to enable optimize_for_inference
pass before dump.
:param enable_io16xc32: whether to use float16 for I/O between oprs and use
float32 as internal computation precision. Note the output var would be
changed to float16.
:param enable_ioc16: whether to use float16 for both I/O and computation
precision.
:param enable_hwcd4: whether to use NHWCD4 data layout. This is faster on some
OpenCL backend.
:param enable_nchw88: whether to use NCHW88 data layout. it currently
used in X86 AVX backend.
:param enable_nchw44: whether to use NCHW44 data layout. it currently
used in arm backend.
:param enable_nchw44_dot: whether to use NCHW44_dot data layout. it currently
used in armv8.2+dotprod backend.
:param enable_nchw4: whether to use NCHW4 data layout. it currently
used in nvidia backend(based on cudnn).
:param enable_nchw32: whether to use NCHW32 data layout. it currently
used in nvidia backend with tensorcore(based on cudnn).
:param enable_chwn4: whether to use CHWN4 data layout. it currently
used in nvidia backend with tensorcore.
:param enable_fuse_conv_bias_nonlinearity: whether to fuse conv+bias+nonlinearty
into one opr.
:param enable_fuse_conv_bias_with_z: whether to fuse conv_bias with z
input for inference on nvidia backend(this optimization pass will
result in mismatch of the precision of output of training and
inference)
"""
if self._status != self._FINISHED:
raise ValueError("not traced")
assert isinstance(self._sym_outputs, (tuple, type(None)))
if not self._sym_outputs:
raise ValueError("not outputs")
if arg_names is None:
arg_names = ["arg_%d" % i for i in range(len(self._args))]
elif len(arg_names) != len(self._args):
raise ValueError(
"len(arg_names) should be {}, got {}".format(
len(self._args), len(arg_names)
)
)
optimize_for_inference_args_map = {
"enable_io16xc32": "f16_io_f32_comp",
"enable_ioc16": "f16_io_comp",
"enable_hwcd4": "use_nhwcd4",
"enable_nchw4": "use_nchw4",
"enable_nchw88": "use_nchw88",
"enable_nchw32": "use_nchw32",
"enable_nchw44": "use_nchw44",
"enable_nchw44_dot": "use_nchw44_dot",
"enable_chwn4": "use_chwn4",
"enable_fuse_conv_bias_nonlinearity": "fuse_conv_bias_nonlinearity",
"enable_fuse_conv_bias_with_z": "fuse_conv_bias_with_z",
}
if optimize_for_inference:
optimize_for_inference_kwargs = {}
for k, v in optimize_for_inference_args_map.items():
if kwargs.pop(k, False):
optimize_for_inference_kwargs[v] = True
else:
for k in optimize_for_inference_args_map:
if kwargs.get(k, False):
raise ValueError(
"cannot set %s when optimize_for_inference is not set" % k
)
if kwargs:
raise ValueError("unknown options: %s" % list(kwargs))
cg = self._sym_outputs[0].owner_graph
replace = {}
for t, name in zip(self._args, arg_names):
# relies on symvar dedup
s = t.__mgb_symvar__(comp_graph=cg)
replace[s] = mgb.make_arg(
t.device, cg, dtype=t.dtype, shape=t.shape, name=name
)
# Convert VolatileSharedDeviceTensor to SharedDeviceTensor,
# otherwise some optimizations would not work. The conversion is
# safe because there simply is no way (using builtin ops) to make
# a VolatileSharedDeviceTensor actually volatile.
for s in mgb.cgtools.get_dep_vars(
self._sym_outputs, "VolatileSharedDeviceTensor"
):
if s in replace:
continue # is an input
replace[s] = mgb.SharedND._from_symvar(s).symvar(
cg, name=s.name, volatile=False
)
sym_outputs = mgb.cgtools.replace_vars(self._sym_outputs, replace)
sym_outputs = list(sym_outputs)
if optimize_for_inference:
sym_outputs = mgb.optimize_for_inference(
sym_outputs, **optimize_for_inference_kwargs
)
| mgb.serialize_comp_graph_to_file(fpath, sym_outputs, append=append) | megengine._internal.serialize_comp_graph_to_file |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = | mgb.opr.virtual_dep(args) | megengine._internal.opr.virtual_dep |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = mgb.opr.virtual_dep(args)
def _mark_impure(self, x):
assert self._active
ret = x
if isinstance(x, Tensor):
x = x._symvar
if not isinstance(x, mgb.SymbolVar):
raise TypeError
self._outspec.append(x)
self._insert_checkpoint(x)
return ret
def _make_proxies(self, args):
assert isinstance(args, (tuple, list))
for x in args:
assert isinstance(x, Tensor)
return tuple(tensor(dtype=x.dtype, device=x.device) for x in args)
def _forward(self, srcs, dests, checkpoint=True):
# pseudo-op: does not run under static graph; traced
# TODO: use shared memory
assert len(srcs) == len(dests)
if not self._active:
for s, d in zip(srcs, dests):
d.set_value(s, share=False)
return
jobs = []
for s, d in zip(srcs, dests):
def callback(value, dest=d):
dest.set_value(value, share=False)
s = self._insert_barrier(s._symvar)
# NOTE: callback immediately fire in eager graph
jobs.append(mgb.opr.callback_injector(s, callback))
self._outspec.extend(jobs)
if checkpoint:
self._insert_checkpoint(*jobs, no_barrier=True)
def _forward_inputs(self, *args, **kwargs):
if self._kwargs is None:
self._kwargs = kwargs
elif self._kwargs != kwargs:
raise ValueError("kwargs must not change between invocations")
if self._args is None:
self._args = []
for i in args:
if isinstance(i, Tensor):
self._args.append(tensor(dtype=i.dtype, device=i.device))
self._args[-1].set_value(i, share=False)
else:
self._args.append(tensor(i))
else:
if not len(args) == len(self._args):
raise TypeError
for i, proxy in zip(args, self._args):
proxy.set_value(i, share=False)
# XXX: sync?
def _make_outputs(self, outputs):
if outputs is None:
self._outputs = None
return
if isinstance(outputs, Tensor):
# no one is able to call barrier after this, so no need to checkpoint
# but checkpoint do little harm anyway
(self._outputs,) = self._make_proxies([outputs])
return
if not isinstance(outputs, (tuple, list)):
raise TypeError("should return (tuple of) tensor")
for i in outputs:
if not isinstance(i, Tensor):
raise TypeError("should return (tuple of) tensor")
self._outputs = self._make_proxies(outputs)
def _foward_outputs(self, outputs):
# pseudo-op: does not run under static graph; traced
if self._outputs is unset:
self._make_outputs(outputs)
if self._outputs is None:
if outputs is not None:
raise TypeError("should return None")
elif isinstance(self._outputs, Tensor):
if not isinstance(outputs, Tensor):
raise TypeError("should return a tensor")
self._forward([outputs], [self._outputs])
else:
assert isinstance(self._outputs, tuple)
def check():
if not isinstance(outputs, (tuple, list)):
return False
if len(self._outputs) != len(outputs):
return False
for x in outputs:
if not isinstance(x, Tensor):
return False
return True
if not check():
raise TypeError(
"should return tuple of %d tensors" % len(self._outputs)
)
self._forward(outputs, self._outputs)
def _apply_graph_options(self, cg):
# graph opt level
if self._graph_opt_level is not None:
cg.set_option("graph_opt_level", self._graph_opt_level)
# log level
if self._log_level is not None:
cg.set_option("log_level", self._log_level)
# sublinear
if self._sublinear_memory_config is not None:
cg.set_option("enable_sublinear_memory_opt", True)
cg.set_option(
"sublinear_mem_cofig.lb_memory",
self._sublinear_memory_config.lb_memory,
)
cg.set_option(
"sublinear_mem_cofig.genetic_nr_iter",
self._sublinear_memory_config.genetic_nr_iter,
)
cg.set_option(
"sublinear_mem_cofig.genetic_pool_size",
self._sublinear_memory_config.genetic_pool_size,
)
cg.set_option(
"sublinear_mem_cofig.thresh_nr_try",
self._sublinear_memory_config.thresh_nr_try,
)
cg.set_option(
"sublinear_mem_cofig.num_worker",
self._sublinear_memory_config.num_worker,
)
# pack allreduce
if self._allreduce_pack_max_size is not None:
cg.set_option("allreduce_pack_max_size", self._allreduce_pack_max_size)
# profile
if self._profiling:
self._profiler = | CompGraphProfiler(cg) | megengine._internal.plugin.CompGraphProfiler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = mgb.opr.virtual_dep(args)
def _mark_impure(self, x):
assert self._active
ret = x
if isinstance(x, Tensor):
x = x._symvar
if not isinstance(x, mgb.SymbolVar):
raise TypeError
self._outspec.append(x)
self._insert_checkpoint(x)
return ret
def _make_proxies(self, args):
assert isinstance(args, (tuple, list))
for x in args:
assert isinstance(x, Tensor)
return tuple(tensor(dtype=x.dtype, device=x.device) for x in args)
def _forward(self, srcs, dests, checkpoint=True):
# pseudo-op: does not run under static graph; traced
# TODO: use shared memory
assert len(srcs) == len(dests)
if not self._active:
for s, d in zip(srcs, dests):
d.set_value(s, share=False)
return
jobs = []
for s, d in zip(srcs, dests):
def callback(value, dest=d):
dest.set_value(value, share=False)
s = self._insert_barrier(s._symvar)
# NOTE: callback immediately fire in eager graph
jobs.append(mgb.opr.callback_injector(s, callback))
self._outspec.extend(jobs)
if checkpoint:
self._insert_checkpoint(*jobs, no_barrier=True)
def _forward_inputs(self, *args, **kwargs):
if self._kwargs is None:
self._kwargs = kwargs
elif self._kwargs != kwargs:
raise ValueError("kwargs must not change between invocations")
if self._args is None:
self._args = []
for i in args:
if isinstance(i, Tensor):
self._args.append(tensor(dtype=i.dtype, device=i.device))
self._args[-1].set_value(i, share=False)
else:
self._args.append(tensor(i))
else:
if not len(args) == len(self._args):
raise TypeError
for i, proxy in zip(args, self._args):
proxy.set_value(i, share=False)
# XXX: sync?
def _make_outputs(self, outputs):
if outputs is None:
self._outputs = None
return
if isinstance(outputs, Tensor):
# no one is able to call barrier after this, so no need to checkpoint
# but checkpoint do little harm anyway
(self._outputs,) = self._make_proxies([outputs])
return
if not isinstance(outputs, (tuple, list)):
raise TypeError("should return (tuple of) tensor")
for i in outputs:
if not isinstance(i, Tensor):
raise TypeError("should return (tuple of) tensor")
self._outputs = self._make_proxies(outputs)
def _foward_outputs(self, outputs):
# pseudo-op: does not run under static graph; traced
if self._outputs is unset:
self._make_outputs(outputs)
if self._outputs is None:
if outputs is not None:
raise TypeError("should return None")
elif isinstance(self._outputs, Tensor):
if not isinstance(outputs, Tensor):
raise TypeError("should return a tensor")
self._forward([outputs], [self._outputs])
else:
assert isinstance(self._outputs, tuple)
def check():
if not isinstance(outputs, (tuple, list)):
return False
if len(self._outputs) != len(outputs):
return False
for x in outputs:
if not isinstance(x, Tensor):
return False
return True
if not check():
raise TypeError(
"should return tuple of %d tensors" % len(self._outputs)
)
self._forward(outputs, self._outputs)
def _apply_graph_options(self, cg):
# graph opt level
if self._graph_opt_level is not None:
cg.set_option("graph_opt_level", self._graph_opt_level)
# log level
if self._log_level is not None:
cg.set_option("log_level", self._log_level)
# sublinear
if self._sublinear_memory_config is not None:
cg.set_option("enable_sublinear_memory_opt", True)
cg.set_option(
"sublinear_mem_cofig.lb_memory",
self._sublinear_memory_config.lb_memory,
)
cg.set_option(
"sublinear_mem_cofig.genetic_nr_iter",
self._sublinear_memory_config.genetic_nr_iter,
)
cg.set_option(
"sublinear_mem_cofig.genetic_pool_size",
self._sublinear_memory_config.genetic_pool_size,
)
cg.set_option(
"sublinear_mem_cofig.thresh_nr_try",
self._sublinear_memory_config.thresh_nr_try,
)
cg.set_option(
"sublinear_mem_cofig.num_worker",
self._sublinear_memory_config.num_worker,
)
# pack allreduce
if self._allreduce_pack_max_size is not None:
cg.set_option("allreduce_pack_max_size", self._allreduce_pack_max_size)
# profile
if self._profiling:
self._profiler = CompGraphProfiler(cg)
def _get_graph(self, eager):
if eager:
if not hasattr(self, "_eager_graph"):
# pylint: disable=attribute-defined-outside-init
self._eager_graph = graph.Graph(eager_evaluation=True)
self._apply_graph_options(self._eager_graph)
return self._eager_graph
else:
if not hasattr(self, "_static_graph"):
# pylint: disable=attribute-defined-outside-init
self._static_graph = graph.Graph(eager_evaluation=False)
self._apply_graph_options(self._static_graph)
return self._static_graph
@contextlib.contextmanager
def _prepare(self, args, kwargs, enable):
# prepare for execution
self._forward_inputs(*args, **kwargs)
if not enable:
# XXX: use our own graph here?
cg = None
elif self._status == self._FINISHED:
cg = None
elif self._symbolic:
cg = self._get_graph(eager=False)
else:
cg = self._get_graph(eager=True)
try:
# NOTE: always trace in a new graph, so capturing an undetached tensor
# will never work (would work if tracing in default graph)
if cg is None:
yield
else:
with cg:
yield
finally:
# XXX: properly release memory
if cg:
cg.clear_device_memory()
@contextlib.contextmanager
def _activate(self):
# prepare for tracing
if self._status != self._UNSTARTED:
raise RuntimeError("cannot trace a second time")
if type(self)._active_instance is not None:
raise RuntimeError("nested trace is unsupported")
self._status = self._STARTED
type(self)._active_instance = self
self._user_cache = {}
try:
yield
finally:
self._status = self._FINISHED
self._user_cache = None
type(self)._active_instance = None
def _run_wrapped(self):
outputs = self.__wrapped__(*self._args, **self._kwargs)
self._foward_outputs(outputs)
return outputs
def _do_trace(self):
with self._activate():
self._outspec = []
outputs = self._run_wrapped()
if outputs is None:
self._sym_outputs = None
else:
if isinstance(outputs, Tensor):
outputs = [outputs]
# _run_wrapped has checked validity of outputs
self._sym_outputs = tuple(i._symvar for i in outputs)
| mgb.comp_graph_tools.set_priority_to_id(self._outspec) | megengine._internal.comp_graph_tools.set_priority_to_id |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = mgb.opr.virtual_dep(args)
def _mark_impure(self, x):
assert self._active
ret = x
if isinstance(x, Tensor):
x = x._symvar
if not isinstance(x, mgb.SymbolVar):
raise TypeError
self._outspec.append(x)
self._insert_checkpoint(x)
return ret
def _make_proxies(self, args):
assert isinstance(args, (tuple, list))
for x in args:
assert isinstance(x, Tensor)
return tuple(tensor(dtype=x.dtype, device=x.device) for x in args)
def _forward(self, srcs, dests, checkpoint=True):
# pseudo-op: does not run under static graph; traced
# TODO: use shared memory
assert len(srcs) == len(dests)
if not self._active:
for s, d in zip(srcs, dests):
d.set_value(s, share=False)
return
jobs = []
for s, d in zip(srcs, dests):
def callback(value, dest=d):
dest.set_value(value, share=False)
s = self._insert_barrier(s._symvar)
# NOTE: callback immediately fire in eager graph
jobs.append( | mgb.opr.callback_injector(s, callback) | megengine._internal.opr.callback_injector |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import contextlib
import functools
import itertools
import os
from typing import Callable, Tuple, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal.plugin import CompGraphProfiler
from ..core import Tensor, graph, tensor
from .sublinear_memory_config import SublinearMemoryConfig
def sideeffect(f):
# during eager tracing, wrapped function is called with proxy inputs
# during static tracing, wrapped function will not be called at all
@functools.wraps(f)
def wrapper(*args, **kwargs): # pylint: disable=inconsistent-return-statements
if not trace._active_instance:
return f(*args, **kwargs)
tensors = {}
for i, x in itertools.chain(enumerate(args), kwargs.items()):
if isinstance(x, Tensor):
tensors[i] = x
if tensors:
_keys, tensors = zip(*tensors.items())
else:
_keys, tensors = (), ()
def callback(*tensors, f=f, keys=_keys, args=args, kwargs=kwargs):
replace = dict(zip(keys, tensors))
args = tuple(replace.get(i, x) for i, x in enumerate(args))
kwargs = {i: replace.get(i, x) for i, x in kwargs.items()}
if f(*args, **kwargs) is not None:
raise TypeError("a sideeffect function should return None")
# TODO: clear memory
trace._active_instance._register_callback(callback, tensors)
return wrapper
def mark_impure(x):
if not trace._active_instance:
return x
return trace._active_instance._mark_impure(x)
def barrier(x):
if not trace._active_instance:
return x
return trace._active_instance._insert_barrier(x)
def _dummy():
return mgb.make_immutable(*graph._use_default_if_none(None, None), 0)
class unset:
pass
class trace:
"""
Wrap a callable and provide:
* tracing via :meth:`.trace` and :meth:`.dump`
* accelerated evalutaion via :meth:`.__call__`
:param func: Positional only argument.
:param symbolic: Whether to use symbolic tensor. Default: False
:param opt_level: Optimization level for compiling trace.
:param log_level: Log level.
:param sublinear_memory_config: Configuration for sublinear memory optimization.
If not None, it enables sublinear memory optimization with given setting.
:param allreduce_pack_max_size: Maximum size of an allreduce pack in MB.
If not None, multiple gradients will be packed and synchronized together
:param profiling: Whether to profile compiled trace. Default: False
"""
_active_instance = None
enabled = not os.getenv("MGE_DISABLE_TRACE")
_UNSTARTED = "unstarted"
_STARTED = "started"
_FINISHED = "finished"
def __new__(cls, *args, **kwargs):
if not args:
return functools.partial(cls, **kwargs)
return super().__new__(cls)
def __init__(
self,
func: Callable[..., Union[None, Tensor, Tuple[Tensor]]],
*,
symbolic: bool = False,
opt_level: int = None,
log_level: int = None,
sublinear_memory_config: SublinearMemoryConfig = None,
allreduce_pack_max_size: int = None,
profiling: bool = False
):
self.__wrapped__ = func
self._symbolic = symbolic
self._graph_opt_level = opt_level
self._log_level = log_level
self._sublinear_memory_config = sublinear_memory_config
self._allreduce_pack_max_size = allreduce_pack_max_size
self._status = self._UNSTARTED
self._args = None
self._kwargs = None
self._outputs = unset
self._sym_outputs = unset
self._outspec = None
self._checkpoint = None
self._compiled_func = None
self._profiling = profiling
self._profiler = None
@property
def _active(self):
c1 = self._status == self._STARTED
c2 = type(self)._active_instance is self
assert c1 == c2
return c1
def _register_callback(self, f, args=()):
assert self._active
assert isinstance(args, (tuple, list))
proxies = self._make_proxies(args)
self._forward(args, proxies, checkpoint=True)
# NOTE: under eager graph callback will fire immediately
job = mgb.opr.callback_injector(
self._insert_barrier(_dummy()), lambda _: f(*proxies)
)
self._insert_checkpoint(job)
self._outspec.append(job)
def _insert_barrier(self, x):
assert self._active
if self._checkpoint is None:
return x
if isinstance(x, Tensor):
x = x._symvar
wrap = True
else:
wrap = False
if not isinstance(x, mgb.SymbolVar):
raise TypeError
x = mgb.opr.virtual_dep([x, self._checkpoint])
if wrap:
x = Tensor(x)
return x
def _insert_checkpoint(self, *args, no_barrier=False):
assert self._active
if not args:
return
args = tuple(x._symvar if isinstance(x, Tensor) else x for x in args)
for x in args:
if not isinstance(x, mgb.SymbolVar):
raise TypeError
if not no_barrier and self._checkpoint is not None:
# normally no need to _insert_barrier here, but if
# someone forget to call _insert_barrier beforehand,
# this can make things less broken
args += (self._checkpoint,)
if len(args) == 1:
self._checkpoint = args[0]
else:
self._checkpoint = mgb.opr.virtual_dep(args)
def _mark_impure(self, x):
assert self._active
ret = x
if isinstance(x, Tensor):
x = x._symvar
if not isinstance(x, mgb.SymbolVar):
raise TypeError
self._outspec.append(x)
self._insert_checkpoint(x)
return ret
def _make_proxies(self, args):
assert isinstance(args, (tuple, list))
for x in args:
assert isinstance(x, Tensor)
return tuple(tensor(dtype=x.dtype, device=x.device) for x in args)
def _forward(self, srcs, dests, checkpoint=True):
# pseudo-op: does not run under static graph; traced
# TODO: use shared memory
assert len(srcs) == len(dests)
if not self._active:
for s, d in zip(srcs, dests):
d.set_value(s, share=False)
return
jobs = []
for s, d in zip(srcs, dests):
def callback(value, dest=d):
dest.set_value(value, share=False)
s = self._insert_barrier(s._symvar)
# NOTE: callback immediately fire in eager graph
jobs.append(mgb.opr.callback_injector(s, callback))
self._outspec.extend(jobs)
if checkpoint:
self._insert_checkpoint(*jobs, no_barrier=True)
def _forward_inputs(self, *args, **kwargs):
if self._kwargs is None:
self._kwargs = kwargs
elif self._kwargs != kwargs:
raise ValueError("kwargs must not change between invocations")
if self._args is None:
self._args = []
for i in args:
if isinstance(i, Tensor):
self._args.append(tensor(dtype=i.dtype, device=i.device))
self._args[-1].set_value(i, share=False)
else:
self._args.append(tensor(i))
else:
if not len(args) == len(self._args):
raise TypeError
for i, proxy in zip(args, self._args):
proxy.set_value(i, share=False)
# XXX: sync?
def _make_outputs(self, outputs):
if outputs is None:
self._outputs = None
return
if isinstance(outputs, Tensor):
# no one is able to call barrier after this, so no need to checkpoint
# but checkpoint do little harm anyway
(self._outputs,) = self._make_proxies([outputs])
return
if not isinstance(outputs, (tuple, list)):
raise TypeError("should return (tuple of) tensor")
for i in outputs:
if not isinstance(i, Tensor):
raise TypeError("should return (tuple of) tensor")
self._outputs = self._make_proxies(outputs)
def _foward_outputs(self, outputs):
# pseudo-op: does not run under static graph; traced
if self._outputs is unset:
self._make_outputs(outputs)
if self._outputs is None:
if outputs is not None:
raise TypeError("should return None")
elif isinstance(self._outputs, Tensor):
if not isinstance(outputs, Tensor):
raise TypeError("should return a tensor")
self._forward([outputs], [self._outputs])
else:
assert isinstance(self._outputs, tuple)
def check():
if not isinstance(outputs, (tuple, list)):
return False
if len(self._outputs) != len(outputs):
return False
for x in outputs:
if not isinstance(x, Tensor):
return False
return True
if not check():
raise TypeError(
"should return tuple of %d tensors" % len(self._outputs)
)
self._forward(outputs, self._outputs)
def _apply_graph_options(self, cg):
# graph opt level
if self._graph_opt_level is not None:
cg.set_option("graph_opt_level", self._graph_opt_level)
# log level
if self._log_level is not None:
cg.set_option("log_level", self._log_level)
# sublinear
if self._sublinear_memory_config is not None:
cg.set_option("enable_sublinear_memory_opt", True)
cg.set_option(
"sublinear_mem_cofig.lb_memory",
self._sublinear_memory_config.lb_memory,
)
cg.set_option(
"sublinear_mem_cofig.genetic_nr_iter",
self._sublinear_memory_config.genetic_nr_iter,
)
cg.set_option(
"sublinear_mem_cofig.genetic_pool_size",
self._sublinear_memory_config.genetic_pool_size,
)
cg.set_option(
"sublinear_mem_cofig.thresh_nr_try",
self._sublinear_memory_config.thresh_nr_try,
)
cg.set_option(
"sublinear_mem_cofig.num_worker",
self._sublinear_memory_config.num_worker,
)
# pack allreduce
if self._allreduce_pack_max_size is not None:
cg.set_option("allreduce_pack_max_size", self._allreduce_pack_max_size)
# profile
if self._profiling:
self._profiler = CompGraphProfiler(cg)
def _get_graph(self, eager):
if eager:
if not hasattr(self, "_eager_graph"):
# pylint: disable=attribute-defined-outside-init
self._eager_graph = graph.Graph(eager_evaluation=True)
self._apply_graph_options(self._eager_graph)
return self._eager_graph
else:
if not hasattr(self, "_static_graph"):
# pylint: disable=attribute-defined-outside-init
self._static_graph = graph.Graph(eager_evaluation=False)
self._apply_graph_options(self._static_graph)
return self._static_graph
@contextlib.contextmanager
def _prepare(self, args, kwargs, enable):
# prepare for execution
self._forward_inputs(*args, **kwargs)
if not enable:
# XXX: use our own graph here?
cg = None
elif self._status == self._FINISHED:
cg = None
elif self._symbolic:
cg = self._get_graph(eager=False)
else:
cg = self._get_graph(eager=True)
try:
# NOTE: always trace in a new graph, so capturing an undetached tensor
# will never work (would work if tracing in default graph)
if cg is None:
yield
else:
with cg:
yield
finally:
# XXX: properly release memory
if cg:
cg.clear_device_memory()
@contextlib.contextmanager
def _activate(self):
# prepare for tracing
if self._status != self._UNSTARTED:
raise RuntimeError("cannot trace a second time")
if type(self)._active_instance is not None:
raise RuntimeError("nested trace is unsupported")
self._status = self._STARTED
type(self)._active_instance = self
self._user_cache = {}
try:
yield
finally:
self._status = self._FINISHED
self._user_cache = None
type(self)._active_instance = None
def _run_wrapped(self):
outputs = self.__wrapped__(*self._args, **self._kwargs)
self._foward_outputs(outputs)
return outputs
def _do_trace(self):
with self._activate():
self._outspec = []
outputs = self._run_wrapped()
if outputs is None:
self._sym_outputs = None
else:
if isinstance(outputs, Tensor):
outputs = [outputs]
# _run_wrapped has checked validity of outputs
self._sym_outputs = tuple(i._symvar for i in outputs)
mgb.comp_graph_tools.set_priority_to_id(self._outspec)
self._compiled_func = graph.get_default_graph().compile(None, self._outspec)
def trace(self, *args: Tensor, **kwargs):
"""
Trace wrapped callable with provided arguments.
"""
with self._prepare(args, kwargs, enable=True):
self._do_trace()
return self
def __call__(self, *args: Tensor, **kwargs):
"""
Evaluate on provided arguments, using compiled trace
instead of the original callable if applicable.
:return: ``None`` or :class:`~.Tensor` or tuple of :class:`~.Tensor`, depending on the
return value of wrapped callable.
"""
with self._prepare(args, kwargs, enable=self.enabled):
if not self.enabled:
self._run_wrapped()
elif self._status == self._FINISHED:
self._compiled_func()
else:
if self._status == self._UNSTARTED:
self._do_trace()
if self._symbolic:
self._compiled_func()
return self._outputs
def dump(
self,
fpath,
*,
arg_names=None,
append=False,
optimize_for_inference=False,
**kwargs
):
"""
Serialize trace to file system.
:param fpath: positional only argument. Path of output file.
:param arg_names: names of the input tensors in the traced function.
:param append: whether output is appended to ``fpath``.
:param optimize_for_inference: whether to enable optimize_for_inference
pass before dump.
:param enable_io16xc32: whether to use float16 for I/O between oprs and use
float32 as internal computation precision. Note the output var would be
changed to float16.
:param enable_ioc16: whether to use float16 for both I/O and computation
precision.
:param enable_hwcd4: whether to use NHWCD4 data layout. This is faster on some
OpenCL backend.
:param enable_nchw88: whether to use NCHW88 data layout. it currently
used in X86 AVX backend.
:param enable_nchw44: whether to use NCHW44 data layout. it currently
used in arm backend.
:param enable_nchw44_dot: whether to use NCHW44_dot data layout. it currently
used in armv8.2+dotprod backend.
:param enable_nchw4: whether to use NCHW4 data layout. it currently
used in nvidia backend(based on cudnn).
:param enable_nchw32: whether to use NCHW32 data layout. it currently
used in nvidia backend with tensorcore(based on cudnn).
:param enable_chwn4: whether to use CHWN4 data layout. it currently
used in nvidia backend with tensorcore.
:param enable_fuse_conv_bias_nonlinearity: whether to fuse conv+bias+nonlinearty
into one opr.
:param enable_fuse_conv_bias_with_z: whether to fuse conv_bias with z
input for inference on nvidia backend(this optimization pass will
result in mismatch of the precision of output of training and
inference)
"""
if self._status != self._FINISHED:
raise ValueError("not traced")
assert isinstance(self._sym_outputs, (tuple, type(None)))
if not self._sym_outputs:
raise ValueError("not outputs")
if arg_names is None:
arg_names = ["arg_%d" % i for i in range(len(self._args))]
elif len(arg_names) != len(self._args):
raise ValueError(
"len(arg_names) should be {}, got {}".format(
len(self._args), len(arg_names)
)
)
optimize_for_inference_args_map = {
"enable_io16xc32": "f16_io_f32_comp",
"enable_ioc16": "f16_io_comp",
"enable_hwcd4": "use_nhwcd4",
"enable_nchw4": "use_nchw4",
"enable_nchw88": "use_nchw88",
"enable_nchw32": "use_nchw32",
"enable_nchw44": "use_nchw44",
"enable_nchw44_dot": "use_nchw44_dot",
"enable_chwn4": "use_chwn4",
"enable_fuse_conv_bias_nonlinearity": "fuse_conv_bias_nonlinearity",
"enable_fuse_conv_bias_with_z": "fuse_conv_bias_with_z",
}
if optimize_for_inference:
optimize_for_inference_kwargs = {}
for k, v in optimize_for_inference_args_map.items():
if kwargs.pop(k, False):
optimize_for_inference_kwargs[v] = True
else:
for k in optimize_for_inference_args_map:
if kwargs.get(k, False):
raise ValueError(
"cannot set %s when optimize_for_inference is not set" % k
)
if kwargs:
raise ValueError("unknown options: %s" % list(kwargs))
cg = self._sym_outputs[0].owner_graph
replace = {}
for t, name in zip(self._args, arg_names):
# relies on symvar dedup
s = t.__mgb_symvar__(comp_graph=cg)
replace[s] = mgb.make_arg(
t.device, cg, dtype=t.dtype, shape=t.shape, name=name
)
# Convert VolatileSharedDeviceTensor to SharedDeviceTensor,
# otherwise some optimizations would not work. The conversion is
# safe because there simply is no way (using builtin ops) to make
# a VolatileSharedDeviceTensor actually volatile.
for s in mgb.cgtools.get_dep_vars(
self._sym_outputs, "VolatileSharedDeviceTensor"
):
if s in replace:
continue # is an input
replace[s] = | mgb.SharedND._from_symvar(s) | megengine._internal.SharedND._from_symvar |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
| dist.init_process_group("localhost", 0, world_size, rank, dev, backend) | megengine.distributed.init_process_group |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
| dist.init_process_group("localhost", port, world_size, rank, dev, backend) | megengine.distributed.init_process_group |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.reduce_sum(inp) | megengine.distributed.functional.reduce_sum |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.gather(inp) | megengine.distributed.functional.gather |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.broadcast(inp) | megengine.distributed.functional.broadcast |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.scatter(inp) | megengine.distributed.functional.scatter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.all_to_all(inp) | megengine.distributed.functional.all_to_all |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.all_gather(inp) | megengine.distributed.functional.all_gather |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.reduce_scatter_sum(inp) | megengine.distributed.functional.reduce_scatter_sum |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.all_reduce_sum(inp) | megengine.distributed.functional.all_reduce_sum |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.all_reduce_max(inp) | megengine.distributed.functional.all_reduce_max |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_max(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.maximum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_min():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | tensor(data) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_max(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.maximum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_min():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = | dist.functional.all_reduce_min(inp) | megengine.distributed.functional.all_reduce_min |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_max(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.maximum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_min():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_min(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.minimum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_bcast_param():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = | Parameter(data) | megengine.core.Parameter |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_max(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.maximum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_min():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_min(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.minimum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_bcast_param():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = Parameter(data)
| dist.functional.bcast_param(inp) | megengine.distributed.functional.bcast_param |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put( | dist.get_master_port() | megengine.distributed.get_master_port |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_max(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.maximum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_min():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.core import Parameter, tensor
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group("localhost", 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group("localhost", port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect)
else:
assert np.allclose(output.numpy(), 0)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, None, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_broadcast():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.broadcast(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(target=worker, args=(0, x, backend, x, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, x, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_scatter():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.scatter(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = x + 1
p0 = mp.Process(
target=worker, args=(0, x, backend, x[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, x[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_to_all():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_to_all(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
p0 = mp.Process(target=worker, args=(0, x, backend, a, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, b, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (100, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_gather():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_gather(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.concatenate((x, y))
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(
target=worker, args=(0, x, backend, z[: shape[0] // 2], port_queue)
)
p1 = mp.Process(
target=worker, args=(1, y, backend, z[shape[0] // 2 :], port_queue)
)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 4), (8, 10), (88, 44)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_sum():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = x + y
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_max():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_max(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.maximum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_all_reduce_min():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if mge.get_device_count("gpu") < world_size:
return
_init_process_group_wrapper(world_size, rank, rank, backend, port_queue)
inp = tensor(data)
output = dist.functional.all_reduce_min(inp)
assert np.allclose(output.numpy(), expect)
def check(shape, backend):
port_queue = mp.Queue()
x = np.random.rand(*shape).astype("float32")
y = np.random.rand(*shape).astype("float32")
z = np.minimum(x, y)
p0 = mp.Process(target=worker, args=(0, x, backend, z, port_queue))
p1 = mp.Process(target=worker, args=(1, y, backend, z, port_queue))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
for shape in [(2, 3), (8, 10), (99, 77)]:
for backend in ["nccl", "ucx"]:
check(shape, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_bcast_param():
world_size = 2
def worker(rank, data, backend, expect, port_queue):
if | mge.get_device_count("gpu") | megengine.get_device_count |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = | megengine.logger.get_logger() | megengine.logger.get_logger |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
| push_scope("clip_grad_norm") | megengine.core._imperative_rt.core2.push_scope |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = | minimum(scale, 1) | megengine.functional.minimum |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
| pop_scope("clip_grad_norm") | megengine.core._imperative_rt.core2.pop_scope |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = | mge.random.uniform(min, max, param.shape) | megengine.random.uniform |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = mge.random.uniform(min, max, param.shape)
param.grad_backup = | F.copy(param.grad) | megengine.functional.copy |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = mge.random.uniform(min, max, param.shape)
param.grad_backup = F.copy(param.grad)
class Session:
def __init__(self, args):
with open(os.path.join(hp.dataset_root, "vocab.txt")) as f:
self.vocab = [w.strip() for w in f.readlines()]
self.vocab = ["<pad>"] + self.vocab
print(f"Vocab Size: {len(self.vocab)}")
self.model = Model(hp.num_mels, len(self.vocab))
world_size = args.world_size * args.ngpus
if world_size > 1:
dist.bcast_list_(self.model.parameters(), dist.WORLD)
# Autodiff gradient manager
self.gm = autodiff.GradManager().attach(
self.model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
self.global_step = 0
self.optimizer = mge.optimizer.Adam(self.model.parameters(), lr=hp.lr)
# load pretrain model
if args.continue_path:
ckpt = mge.load(args.continue_path)
if "model" in ckpt:
state_dict = ckpt["model"]
self.model.load_state_dict(state_dict, strict=False)
self.loss_names = ["total"]
self.criterion = LabelSmoothingLoss(len(self.vocab), 0, hp.lsm_weight)
def get_learning_rate(self):
lr = self.optimizer.param_groups[0]["lr"]
return lr
def get_current_losses(self):
losses = OrderedDict()
for name in self.loss_names:
losses[name] = float(getattr(self, "loss_" + name))
return losses
def optimize_parameters(self, data):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
text_input, text_output, mel, pos_text, pos_mel, text_length, mel_length = data
with self.gm:
hs_pad, hs_mask, pred_pad, pred_mask = self.model.forward(
mel, mel_length, text_input, text_length
)
self.loss_total = self.criterion(pred_pad, text_output)
self.gm.backward(self.loss_total)
clip_grad_norm(self.model.parameters(), 1.0)
self.optimizer.step().clear_grad()
def main():
os.makedirs(hp.checkpoint_path, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--continue_path")
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
help="number of total epochs to run (default: 90)",
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(
port=args.dist_port
) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
if rank == 0:
os.makedirs(os.path.join(args.save, "asr"), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, "asr", "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader = build_dataset(args)
train_queue = iter(train_dataloader)
steps_per_epoch = 164905 // (world_size * hp.batch_size)
sess = Session(args)
ma_rates = [1 - 0.01 ** x for x in range(3)]
ma_dict = {
"losses": [
{k: exponential_ma(rate) for k in sess.loss_names} for rate in ma_rates
],
"ttrain": [exponential_ma(rate) for rate in ma_rates],
"tdata": [exponential_ma(rate) for rate in ma_rates],
}
for epoch in range(1, (hp.epochs + 1) * steps_per_epoch):
t_minibatch_start = time.time()
sess.global_step += 1
if sess.global_step < 400000:
adjust_learning_rate(sess.optimizer, sess.global_step)
tdata = time.time() - t_minibatch_start
data = next(train_queue)
sess.optimize_parameters(data)
losses = sess.get_current_losses()
ttrain = time.time() - t_minibatch_start
# print(ttrain, tdata)
update_train_log(sess.loss_names, ma_dict, losses, ttrain, tdata)
if sess.global_step % hp.log_interval == 0 and rank == 0:
print_train_log(sess, epoch, epoch, ma_dict, hp.epochs * steps_per_epoch)
if sess.global_step % hp.save_interval == 0 and rank == 0:
print("*******************************************")
mge.save(
{"model": sess.model.state_dict(), "global_step": sess.global_step},
os.path.join(
hp.checkpoint_path, "checkpoint_%d.pkl" % sess.global_step
),
)
print("*******************************************")
if sess.global_step > hp.max_steps:
exit(1)
def build_dataset(args):
dataset = AsrDataset()
train_sampler = data.Infinite(
| RandomSampler(dataset=dataset, batch_size=hp.batch_size) | megengine.data.RandomSampler |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm( | concat(norm_) | megengine.functional.concat |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = mge.random.uniform(min, max, param.shape)
param.grad_backup = F.copy(param.grad)
class Session:
def __init__(self, args):
with open(os.path.join(hp.dataset_root, "vocab.txt")) as f:
self.vocab = [w.strip() for w in f.readlines()]
self.vocab = ["<pad>"] + self.vocab
print(f"Vocab Size: {len(self.vocab)}")
self.model = Model(hp.num_mels, len(self.vocab))
world_size = args.world_size * args.ngpus
if world_size > 1:
dist.bcast_list_(self.model.parameters(), dist.WORLD)
# Autodiff gradient manager
self.gm = autodiff.GradManager().attach(
self.model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
self.global_step = 0
self.optimizer = mge.optimizer.Adam(self.model.parameters(), lr=hp.lr)
# load pretrain model
if args.continue_path:
ckpt = | mge.load(args.continue_path) | megengine.load |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = mge.random.uniform(min, max, param.shape)
param.grad_backup = F.copy(param.grad)
class Session:
def __init__(self, args):
with open(os.path.join(hp.dataset_root, "vocab.txt")) as f:
self.vocab = [w.strip() for w in f.readlines()]
self.vocab = ["<pad>"] + self.vocab
print(f"Vocab Size: {len(self.vocab)}")
self.model = Model(hp.num_mels, len(self.vocab))
world_size = args.world_size * args.ngpus
if world_size > 1:
dist.bcast_list_(self.model.parameters(), dist.WORLD)
# Autodiff gradient manager
self.gm = autodiff.GradManager().attach(
self.model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
self.global_step = 0
self.optimizer = mge.optimizer.Adam(self.model.parameters(), lr=hp.lr)
# load pretrain model
if args.continue_path:
ckpt = mge.load(args.continue_path)
if "model" in ckpt:
state_dict = ckpt["model"]
self.model.load_state_dict(state_dict, strict=False)
self.loss_names = ["total"]
self.criterion = LabelSmoothingLoss(len(self.vocab), 0, hp.lsm_weight)
def get_learning_rate(self):
lr = self.optimizer.param_groups[0]["lr"]
return lr
def get_current_losses(self):
losses = OrderedDict()
for name in self.loss_names:
losses[name] = float(getattr(self, "loss_" + name))
return losses
def optimize_parameters(self, data):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
text_input, text_output, mel, pos_text, pos_mel, text_length, mel_length = data
with self.gm:
hs_pad, hs_mask, pred_pad, pred_mask = self.model.forward(
mel, mel_length, text_input, text_length
)
self.loss_total = self.criterion(pred_pad, text_output)
self.gm.backward(self.loss_total)
clip_grad_norm(self.model.parameters(), 1.0)
self.optimizer.step().clear_grad()
def main():
os.makedirs(hp.checkpoint_path, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--continue_path")
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
help="number of total epochs to run (default: 90)",
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(
port=args.dist_port
) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
if rank == 0:
os.makedirs(os.path.join(args.save, "asr"), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, "asr", "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", | dist.get_rank() | megengine.distributed.get_rank |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = mge.random.uniform(min, max, param.shape)
param.grad_backup = F.copy(param.grad)
class Session:
def __init__(self, args):
with open(os.path.join(hp.dataset_root, "vocab.txt")) as f:
self.vocab = [w.strip() for w in f.readlines()]
self.vocab = ["<pad>"] + self.vocab
print(f"Vocab Size: {len(self.vocab)}")
self.model = Model(hp.num_mels, len(self.vocab))
world_size = args.world_size * args.ngpus
if world_size > 1:
dist.bcast_list_(self.model.parameters(), dist.WORLD)
# Autodiff gradient manager
self.gm = autodiff.GradManager().attach(
self.model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
self.global_step = 0
self.optimizer = mge.optimizer.Adam(self.model.parameters(), lr=hp.lr)
# load pretrain model
if args.continue_path:
ckpt = mge.load(args.continue_path)
if "model" in ckpt:
state_dict = ckpt["model"]
self.model.load_state_dict(state_dict, strict=False)
self.loss_names = ["total"]
self.criterion = LabelSmoothingLoss(len(self.vocab), 0, hp.lsm_weight)
def get_learning_rate(self):
lr = self.optimizer.param_groups[0]["lr"]
return lr
def get_current_losses(self):
losses = OrderedDict()
for name in self.loss_names:
losses[name] = float(getattr(self, "loss_" + name))
return losses
def optimize_parameters(self, data):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
text_input, text_output, mel, pos_text, pos_mel, text_length, mel_length = data
with self.gm:
hs_pad, hs_mask, pred_pad, pred_mask = self.model.forward(
mel, mel_length, text_input, text_length
)
self.loss_total = self.criterion(pred_pad, text_output)
self.gm.backward(self.loss_total)
clip_grad_norm(self.model.parameters(), 1.0)
self.optimizer.step().clear_grad()
def main():
os.makedirs(hp.checkpoint_path, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--continue_path")
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
help="number of total epochs to run (default: 90)",
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(
port=args.dist_port
) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
if rank == 0:
os.makedirs(os.path.join(args.save, "asr"), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, "asr", "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), | dist.get_world_size() | megengine.distributed.get_world_size |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = mge.random.uniform(min, max, param.shape)
param.grad_backup = F.copy(param.grad)
class Session:
def __init__(self, args):
with open(os.path.join(hp.dataset_root, "vocab.txt")) as f:
self.vocab = [w.strip() for w in f.readlines()]
self.vocab = ["<pad>"] + self.vocab
print(f"Vocab Size: {len(self.vocab)}")
self.model = Model(hp.num_mels, len(self.vocab))
world_size = args.world_size * args.ngpus
if world_size > 1:
dist.bcast_list_(self.model.parameters(), dist.WORLD)
# Autodiff gradient manager
self.gm = | autodiff.GradManager() | megengine.autodiff.GradManager |
import os
import sys
import time
from collections import OrderedDict
from time import strftime, gmtime
from tensorboardX import SummaryWriter
from dataset import AsrDataset, DataLoader, AsrCollator
from models.transformer import Model
import hparams as hp
import argparse
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.functional import clip, concat, minimum, norm
from megengine.core._imperative_rt.core2 import pop_scope, push_scope
from typing import Iterable, Union
from megengine.tensor import Tensor
import megengine.distributed as dist
from megengine.data import SequentialSampler, RandomSampler, DataLoader
from criterions.label_smoothing_loss import LabelSmoothingLoss
from megengine.utils.network import Network as Net
import megengine.autodiff as autodiff
import megengine.data as data
import megengine
import multiprocessing
logging = megengine.logger.get_logger()
def clip_grad_norm(
tensors: Union[Tensor, Iterable[Tensor]],
max_norm: float,
ord: float = 2.0,
):
push_scope("clip_grad_norm")
if isinstance(tensors, Tensor):
tensors = [tensors]
tensors = [t for t in tensors if t.grad is not None]
norm_ = [norm(t.grad.flatten(), ord=ord) for t in tensors]
if len(norm_) > 1:
norm_ = norm(concat(norm_), ord=ord)
else:
norm_ = norm_[0]
scale = max_norm / (norm_ + 1e-6)
scale = minimum(scale, 1)
for tensor in tensors:
tensor.grad._reset(tensor.grad * scale)
pop_scope("clip_grad_norm")
return norm_
class exponential_ma:
def __init__(self, ratio):
self.value = 0
self.weight = 0
self.ratio = ratio
def update(self, x):
self.value = self.value * self.ratio + (1 - self.ratio) * x
self.weight = self.weight * self.ratio + (1 - self.ratio)
def get_value(self):
if self.weight < 1e-8:
return 0
return self.value / self.weight
def update_train_log(monitor_vars_name, ma_dict, losses, ttrain, tdata):
for n in monitor_vars_name:
for ma in ma_dict["losses"]:
ma[n].update(losses[n])
for ma in ma_dict["ttrain"]:
ma.update(ttrain)
for ma in ma_dict["tdata"]:
ma.update(tdata)
def print_train_log(sess, epoch, minibatch, ma_dict, minibatch_per_epoch):
ma_output = "[{}] e:{}, {}/{} ".format(
strftime("%Y-%m-%d %H:%M:%S", gmtime()), epoch, minibatch, minibatch_per_epoch
)
print(ma_output, file=sys.stderr)
line = " {:31}:".format("speed")
for ma in ma_dict["ttrain"]:
line += "{:10.2g}".format(1 / ma.get_value())
print(line, file=sys.stderr)
line = " {:31}".format("dp/tot")
for ma1, ma2 in zip(ma_dict["ttrain"], ma_dict["tdata"]):
line += "{:10.2g}".format(ma2.get_value() / ma1.get_value())
print(line, file=sys.stderr)
for k in sess.loss_names:
line = " {:31}".format(k)
for ma in ma_dict["losses"]:
line += "{:10.2E}".format(ma[k].get_value())
print(line, file=sys.stderr)
line = " {:31}: {}".format("lr", sess.get_learning_rate())
print(line, file=sys.stderr)
sys.stderr.flush()
def adjust_learning_rate(optimizer, step_num, warmup_step=4000):
lr = (
hp.lr
* warmup_step ** 0.5
* min(step_num * warmup_step ** -1.5, step_num ** -0.5)
)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def set_grad(net, min, max):
for param in net.parameters():
param.grad = mge.random.uniform(min, max, param.shape)
param.grad_backup = F.copy(param.grad)
class Session:
def __init__(self, args):
with open(os.path.join(hp.dataset_root, "vocab.txt")) as f:
self.vocab = [w.strip() for w in f.readlines()]
self.vocab = ["<pad>"] + self.vocab
print(f"Vocab Size: {len(self.vocab)}")
self.model = Model(hp.num_mels, len(self.vocab))
world_size = args.world_size * args.ngpus
if world_size > 1:
dist.bcast_list_(self.model.parameters(), dist.WORLD)
# Autodiff gradient manager
self.gm = autodiff.GradManager().attach(
self.model.parameters(),
callbacks= | dist.make_allreduce_cb("SUM") | megengine.distributed.make_allreduce_cb |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.jit as jit
from .. import gan
from ..blocks import DBlock, DBlockOptimized
class WGANBaseGenerator(gan.BaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, **kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type="wasserstein",
**kwargs)
class WGANBaseDiscriminator(gan.BaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, **kwargs):
super().__init__(ndf=ndf, loss_type="wasserstein", **kwargs)
def _reset_jit_graph(self, impl: callable):
"""We override this func to attach weight clipping after default training step"""
traced_obj = jit.trace(impl)
def _(*args, **kwargs):
ret = traced_obj(*args, **kwargs)
if self.training:
self._apply_lipshitz_constraint() # dynamically apply weight clipping
return ret
return _
def _apply_lipshitz_constraint(self):
"""Weight clipping described in [Wasserstein GAN](https://arxiv.org/abs/1701.07875)"""
for p in self.parameters():
F.add_update(p, F.clamp(p, lower=-3e-2, upper=3e-2), alpha=0)
def layernorm(x):
original_shape = x.shape
x = x.reshape(original_shape[0], -1)
m = | F.mean(x, axis=1, keepdims=True) | megengine.functional.mean |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.jit as jit
from .. import gan
from ..blocks import DBlock, DBlockOptimized
class WGANBaseGenerator(gan.BaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, **kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type="wasserstein",
**kwargs)
class WGANBaseDiscriminator(gan.BaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, **kwargs):
super().__init__(ndf=ndf, loss_type="wasserstein", **kwargs)
def _reset_jit_graph(self, impl: callable):
"""We override this func to attach weight clipping after default training step"""
traced_obj = jit.trace(impl)
def _(*args, **kwargs):
ret = traced_obj(*args, **kwargs)
if self.training:
self._apply_lipshitz_constraint() # dynamically apply weight clipping
return ret
return _
def _apply_lipshitz_constraint(self):
"""Weight clipping described in [Wasserstein GAN](https://arxiv.org/abs/1701.07875)"""
for p in self.parameters():
F.add_update(p, F.clamp(p, lower=-3e-2, upper=3e-2), alpha=0)
def layernorm(x):
original_shape = x.shape
x = x.reshape(original_shape[0], -1)
m = F.mean(x, axis=1, keepdims=True)
v = | F.mean((x - m) ** 2, axis=1, keepdims=True) | megengine.functional.mean |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.jit as jit
from .. import gan
from ..blocks import DBlock, DBlockOptimized
class WGANBaseGenerator(gan.BaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, **kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type="wasserstein",
**kwargs)
class WGANBaseDiscriminator(gan.BaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, **kwargs):
super().__init__(ndf=ndf, loss_type="wasserstein", **kwargs)
def _reset_jit_graph(self, impl: callable):
"""We override this func to attach weight clipping after default training step"""
traced_obj = | jit.trace(impl) | megengine.jit.trace |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.jit as jit
from .. import gan
from ..blocks import DBlock, DBlockOptimized
class WGANBaseGenerator(gan.BaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, **kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type="wasserstein",
**kwargs)
class WGANBaseDiscriminator(gan.BaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, **kwargs):
super().__init__(ndf=ndf, loss_type="wasserstein", **kwargs)
def _reset_jit_graph(self, impl: callable):
"""We override this func to attach weight clipping after default training step"""
traced_obj = jit.trace(impl)
def _(*args, **kwargs):
ret = traced_obj(*args, **kwargs)
if self.training:
self._apply_lipshitz_constraint() # dynamically apply weight clipping
return ret
return _
def _apply_lipshitz_constraint(self):
"""Weight clipping described in [Wasserstein GAN](https://arxiv.org/abs/1701.07875)"""
for p in self.parameters():
F.add_update(p, F.clamp(p, lower=-3e-2, upper=3e-2), alpha=0)
def layernorm(x):
original_shape = x.shape
x = x.reshape(original_shape[0], -1)
m = F.mean(x, axis=1, keepdims=True)
v = F.mean((x - m) ** 2, axis=1, keepdims=True)
x = (x - m) / F.maximum( | F.sqrt(v) | megengine.functional.sqrt |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.jit as jit
from .. import gan
from ..blocks import DBlock, DBlockOptimized
class WGANBaseGenerator(gan.BaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, **kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type="wasserstein",
**kwargs)
class WGANBaseDiscriminator(gan.BaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, **kwargs):
super().__init__(ndf=ndf, loss_type="wasserstein", **kwargs)
def _reset_jit_graph(self, impl: callable):
"""We override this func to attach weight clipping after default training step"""
traced_obj = jit.trace(impl)
def _(*args, **kwargs):
ret = traced_obj(*args, **kwargs)
if self.training:
self._apply_lipshitz_constraint() # dynamically apply weight clipping
return ret
return _
def _apply_lipshitz_constraint(self):
"""Weight clipping described in [Wasserstein GAN](https://arxiv.org/abs/1701.07875)"""
for p in self.parameters():
F.add_update(p, F.clamp(p, lower=-3e-2, upper=3e-2), alpha=0)
def layernorm(x):
original_shape = x.shape
x = x.reshape(original_shape[0], -1)
m = F.mean(x, axis=1, keepdims=True)
v = F.mean((x - m) ** 2, axis=1, keepdims=True)
x = (x - m) / F.maximum(F.sqrt(v), 1e-6)
x = x.reshape(original_shape)
return x
class WGANDBlockWithLayerNorm(DBlock):
def _residual(self, x):
h = x
h = layernorm(h)
h = self.activation(h)
h = self.c1(h)
h = layernorm(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = | F.avg_pool2d(h, 2) | megengine.functional.avg_pool2d |
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.jit as jit
from .. import gan
from ..blocks import DBlock, DBlockOptimized
class WGANBaseGenerator(gan.BaseGenerator):
r"""
ResNet backbone generator for ResNet WGAN.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, **kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type="wasserstein",
**kwargs)
class WGANBaseDiscriminator(gan.BaseDiscriminator):
r"""
ResNet backbone discriminator for ResNet WGAN.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, **kwargs):
super().__init__(ndf=ndf, loss_type="wasserstein", **kwargs)
def _reset_jit_graph(self, impl: callable):
"""We override this func to attach weight clipping after default training step"""
traced_obj = jit.trace(impl)
def _(*args, **kwargs):
ret = traced_obj(*args, **kwargs)
if self.training:
self._apply_lipshitz_constraint() # dynamically apply weight clipping
return ret
return _
def _apply_lipshitz_constraint(self):
"""Weight clipping described in [Wasserstein GAN](https://arxiv.org/abs/1701.07875)"""
for p in self.parameters():
F.add_update(p, | F.clamp(p, lower=-3e-2, upper=3e-2) | megengine.functional.clamp |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return | M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1) | megengine.module.ConvTranspose2d |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return | M.Conv2d(ic, oc, 3, padding=1, stride=2) | megengine.module.Conv2d |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = | F.dimshuffle(x, (0, 1, 4, 2, 5, 3)) | megengine.functional.dimshuffle |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, | M.LeakyReLU(0.1) | megengine.module.LeakyReLU |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, | M.Sigmoid() | megengine.module.Sigmoid |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = | M.Conv2d(channels, channels, 3, 1, padding=1, bias=True) | megengine.module.Conv2d |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = | M.Conv2d(channels, 16, kernel_size=1) | megengine.module.Conv2d |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = | M.Conv2d(16, channels, kernel_size=1) | megengine.module.Conv2d |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = | M.LeakyReLU(0.1) | megengine.module.LeakyReLU |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = | M.LeakyReLU(0.1) | megengine.module.LeakyReLU |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = | M.LeakyReLU(0.1) | megengine.module.LeakyReLU |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = | M.Identity() | megengine.module.Identity |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4( | F.concat((conv3, conv2), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5( | F.concat((conv4, conv3), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6( | F.concat((conv5, conv4, conv2), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6(F.concat((conv5, conv4, conv2), 1)))
conv7 = (self.conv7( | F.concat((conv6, conv5, conv3), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6(F.concat((conv5, conv4, conv2), 1)))
conv7 = (self.conv7(F.concat((conv6, conv5, conv3), 1)))
conv8 = (self.conv8( | F.concat((conv7, conv6, conv4), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6(F.concat((conv5, conv4, conv2), 1)))
conv7 = (self.conv7(F.concat((conv6, conv5, conv3), 1)))
conv8 = (self.conv8(F.concat((conv7, conv6, conv4), 1)))
conv9 = (self.conv9( | F.concat((conv8, conv7, conv5), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6(F.concat((conv5, conv4, conv2), 1)))
conv7 = (self.conv7(F.concat((conv6, conv5, conv3), 1)))
conv8 = (self.conv8(F.concat((conv7, conv6, conv4), 1)))
conv9 = (self.conv9(F.concat((conv8, conv7, conv5), 1)))
conv10 = subpixel(self.conv10(F.concat((conv9, conv1), 1)))
conv11 = subpixel(self.conv11(F.concat((conv10, conv0), 1)))
conv11 = conv11 * 2 - 1 # sigmoid to [-1, 1]
return F.minimum( | F.maximum(conv11 + x[:, 6:9], 0) | megengine.functional.maximum |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = | M.Conv2d(in_channels, channels, 1, stride, bias=False) | megengine.module.Conv2d |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6(F.concat((conv5, conv4, conv2), 1)))
conv7 = (self.conv7(F.concat((conv6, conv5, conv3), 1)))
conv8 = (self.conv8(F.concat((conv7, conv6, conv4), 1)))
conv9 = (self.conv9(F.concat((conv8, conv7, conv5), 1)))
conv10 = subpixel(self.conv10( | F.concat((conv9, conv1), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
M.Conv2d(in_channels, channels, 1, 1, bias=False)
)
self.fc1 = M.Conv2d(channels, 16, kernel_size=1)
self.fc2 = M.Conv2d(16, channels, kernel_size=1)
self.relu1 = M.LeakyReLU(0.1)
self.relu2 = M.LeakyReLU(0.1)
self.relu3 = M.LeakyReLU(0.1)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
identity = self.downsample(identity)
w = x.mean(3, True).mean(2, True)
w = self.relu2(self.fc1(w))
w = F.sigmoid(self.fc2(w))
x = x * w + identity
x = self.relu3(x)
return x
def subpixel(x):
shape = x.shape
x = x.reshape(shape[0], shape[1] // 4, 2, 2, shape[2], shape[3])
x = F.dimshuffle(x, (0, 1, 4, 2, 5, 3))
return x.reshape(shape[0], shape[1] // 4, shape[2]*2, shape[3]*2)
c = 64
class SimpleUNet(M.Module):
def __init__(self):
super().__init__()
self.conv0_ = (BasicBlock(3, 32, stride=2))
self.conv1_ = (BasicBlock(32, c, stride=2))
self.conv0 = (BasicBlock(15, 32, stride=2))
self.conv1 = (BasicBlock(32, c, stride=2))
self.conv2 = (BasicBlock(c, 2*c, stride=1))
self.conv3 = (BasicBlock(2*c, 2*c, stride=1))
self.conv4 = (BasicBlock(4*c, 2*c, stride=1))
self.conv5 = (BasicBlock(4*c, 2*c, stride=1))
self.conv6 = (BasicBlock(6*c, 2*c, stride=1))
self.conv7 = (BasicBlock(6*c, 2*c, stride=1))
self.conv8 = (BasicBlock(6*c, 2*c, stride=1))
self.conv9 = (BasicBlock(6*c, 2*c, stride=1))
self.conv10 = (BasicBlock(3*c, 4*c, stride=1))
self.conv11 = addSig(M.Conv2d(c+32, 12, 1))
def forward(self, x):
size = x.shape
x = x.reshape((size[0] * 5, 3) + size[2:])
conv0 = tsm(self.conv0_(x))
conv1 = tsm(self.conv1_(conv0))
#
x = (x.reshape((size[0], 15) + x.shape[2:]))
conv0_ = (conv0.reshape((size[0], 5) + conv0.shape[1:]))[:, 2]
conv1_ = (conv1.reshape((size[0], 5) + conv1.shape[1:]))[:, 2]
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv0 += conv0_
conv1 += conv1_
conv2 = (self.conv2(conv1))
conv3 = (self.conv3(conv2))
conv4 = (self.conv4(F.concat((conv3, conv2), 1)))
conv5 = (self.conv5(F.concat((conv4, conv3), 1)))
conv6 = (self.conv6(F.concat((conv5, conv4, conv2), 1)))
conv7 = (self.conv7(F.concat((conv6, conv5, conv3), 1)))
conv8 = (self.conv8(F.concat((conv7, conv6, conv4), 1)))
conv9 = (self.conv9(F.concat((conv8, conv7, conv5), 1)))
conv10 = subpixel(self.conv10(F.concat((conv9, conv1), 1)))
conv11 = subpixel(self.conv11( | F.concat((conv10, conv0), 1) | megengine.functional.concat |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
| M.AvgPool2d(kernel_size=stride, stride=stride) | megengine.module.AvgPool2d |
import megengine as mge
import megengine.module as M
import megengine.functional as F
from megengine.core import Parameter
from utils import *
def addLeakyRelu(x):
return M.Sequential(x, M.LeakyReLU(0.1))
def addSig(x):
return M.Sequential(x, M.Sigmoid())
def up_block(x, ic, oc):
return M.ConvTranspose2d(ic, oc, 4, stride=2, padding=1)
def down_block(x, ic, oc):
return M.Conv2d(ic, oc, 3, padding=1, stride=2)
class BasicBlock(M.Module):
expansion = 1
def __init__(
self,
in_channels,
channels,
stride=1,
groups=1,
base_width=64,
dilation=1,
norm=M.BatchNorm2d,
):
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = M.Conv2d(
in_channels, channels, 3, stride, padding=dilation, bias=True
)
self.conv2 = M.Conv2d(channels, channels, 3, 1, padding=1, bias=True)
if in_channels == channels and stride == 1:
self.downsample = M.Identity()
elif stride == 1:
self.downsample = M.Conv2d(in_channels, channels, 1, stride, bias=False)
else:
self.downsample = M.Sequential(
M.AvgPool2d(kernel_size=stride, stride=stride),
| M.Conv2d(in_channels, channels, 1, 1, bias=False) | megengine.module.Conv2d |
# MegFlow is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2019-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#!/usr/bin/env python
# coding=utf-8
from math import log
from loguru import logger
import megengine as mge
import cv2
import megengine.functional as F
import numpy as np
from .model import Model
if __name__ == "__main__":
import sys
if len(sys.argv) < 5:
print("usage: python3 -m reid_alignedreid/demo reid.pkl positive1.png positive2.png negtive.jpg")
sys.exit(0)
model = Model()
sd = | mge.load(sys.argv[1]) | megengine.load |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
import numpy as np
import pytest
from megengine.core.tensor.dtype import intb1, intb2, intb4
from megengine.tensor import Tensor
def bit_define_test(bit, low_bit_type):
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
a = np.array([i for i in range(min_value, max_value + 2, 2)], dtype=low_bit_type)
for i in range(max_value + 1):
np.testing.assert_equal(a[i], i * 2 - max_value)
np.testing.assert_equal(str(a[i]), str(i * 2 - max_value))
with pytest.raises(ValueError):
np.arange(min_value, max_value, dtype=low_bit_type)
with pytest.raises(ValueError):
np.arange(min_value - 2, max_value + 4, 2, dtype=low_bit_type)
np.testing.assert_allclose(
np.arange(min_value, 12, 2, dtype=low_bit_type),
(np.arange((13 - min_value) // 2, dtype=np.int8) % (max_value + 1)) * 2
- max_value,
)
np.testing.assert_allclose(
np.arange(max_value, max_value - 20, -2, dtype=low_bit_type),
(np.arange(max_value, max_value - 10, -1, dtype=np.int8) % (max_value + 1)) * 2
- max_value,
)
def test_define():
bit_define_test(1, intb1)
bit_define_test(2, intb2)
bit_define_test(4, intb4)
def _bit_cast_test(bit, low_bit_type):
dtypes = [np.int8, np.int16, np.int32, np.float32, np.float64]
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
for dtype in dtypes:
np.testing.assert_allclose(
np.arange(min_value, max_value + 2, 2, dtype=low_bit_type).astype(dtype),
np.arange(min_value, max_value + 2, 2, dtype=dtype),
)
with pytest.raises(ValueError):
np.array([2, 1, -1], dtype=int).astype(low_bit_type)
with pytest.raises(ValueError):
np.array([min_value - 2, 1, max_value + 2], dtype=int).astype(low_bit_type)
def test_cast():
_bit_cast_test(1, intb1)
_bit_cast_test(2, intb2)
_bit_cast_test(4, intb4)
def _shared_nd_test(bit, low_bit_type):
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
data = np.arange(min_value, max_value + 2, 2, dtype=low_bit_type)
snd = | Tensor(data, dtype=low_bit_type, device="xpux") | megengine.tensor.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
import numpy as np
import pytest
from megengine.core.tensor.dtype import intb1, intb2, intb4
from megengine.tensor import Tensor
def bit_define_test(bit, low_bit_type):
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
a = np.array([i for i in range(min_value, max_value + 2, 2)], dtype=low_bit_type)
for i in range(max_value + 1):
np.testing.assert_equal(a[i], i * 2 - max_value)
np.testing.assert_equal(str(a[i]), str(i * 2 - max_value))
with pytest.raises(ValueError):
np.arange(min_value, max_value, dtype=low_bit_type)
with pytest.raises(ValueError):
np.arange(min_value - 2, max_value + 4, 2, dtype=low_bit_type)
np.testing.assert_allclose(
np.arange(min_value, 12, 2, dtype=low_bit_type),
(np.arange((13 - min_value) // 2, dtype=np.int8) % (max_value + 1)) * 2
- max_value,
)
np.testing.assert_allclose(
np.arange(max_value, max_value - 20, -2, dtype=low_bit_type),
(np.arange(max_value, max_value - 10, -1, dtype=np.int8) % (max_value + 1)) * 2
- max_value,
)
def test_define():
bit_define_test(1, intb1)
bit_define_test(2, intb2)
bit_define_test(4, intb4)
def _bit_cast_test(bit, low_bit_type):
dtypes = [np.int8, np.int16, np.int32, np.float32, np.float64]
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
for dtype in dtypes:
np.testing.assert_allclose(
np.arange(min_value, max_value + 2, 2, dtype=low_bit_type).astype(dtype),
np.arange(min_value, max_value + 2, 2, dtype=dtype),
)
with pytest.raises(ValueError):
np.array([2, 1, -1], dtype=int).astype(low_bit_type)
with pytest.raises(ValueError):
np.array([min_value - 2, 1, max_value + 2], dtype=int).astype(low_bit_type)
def test_cast():
_bit_cast_test(1, intb1)
_bit_cast_test(2, intb2)
_bit_cast_test(4, intb4)
def _shared_nd_test(bit, low_bit_type):
max_value = (1 << bit) - 1
min_value = 1 - (1 << bit)
data = np.arange(min_value, max_value + 2, 2, dtype=low_bit_type)
snd = Tensor(data, dtype=low_bit_type, device="xpux")
np.testing.assert_allclose(snd.numpy(), range(min_value, max_value + 2, 2))
data = np.arange(min_value, max_value + 2, 4, dtype=low_bit_type)
snd = | Tensor(data, dtype=low_bit_type, device="xpux") | megengine.tensor.Tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: skip-file
import argparse
import sys
# pylint: disable=import-error
import resnet.model as resnet_model
# pylint: disable=import-error
import shufflenet.model as snet_model
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit
def dump_static_graph(model, graph_name, shape):
model.eval()
data = mge.Tensor(np.ones(shape, dtype=np.uint8))
@ | jit.trace(capture_as_const=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: skip-file
import argparse
import sys
# pylint: disable=import-error
import resnet.model as resnet_model
# pylint: disable=import-error
import shufflenet.model as snet_model
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit
def dump_static_graph(model, graph_name, shape):
model.eval()
data = mge.Tensor(np.ones(shape, dtype=np.uint8))
@jit.trace(capture_as_const=True)
def pred_func(data):
out = data.astype(np.float32)
output_h, output_w = 224, 224
# resize
print(shape)
M = mge.tensor(np.array([[1,0,0], [0,1,0], [0,0,1]], dtype=np.float32))
M_shape = | F.concat([data.shape[0],M.shape]) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: skip-file
import argparse
import sys
# pylint: disable=import-error
import resnet.model as resnet_model
# pylint: disable=import-error
import shufflenet.model as snet_model
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit
def dump_static_graph(model, graph_name, shape):
model.eval()
data = mge.Tensor(np.ones(shape, dtype=np.uint8))
@jit.trace(capture_as_const=True)
def pred_func(data):
out = data.astype(np.float32)
output_h, output_w = 224, 224
# resize
print(shape)
M = mge.tensor(np.array([[1,0,0], [0,1,0], [0,0,1]], dtype=np.float32))
M_shape = F.concat([data.shape[0],M.shape])
M = | F.broadcast_to(M, M_shape) | megengine.functional.broadcast_to |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: skip-file
import argparse
import sys
# pylint: disable=import-error
import resnet.model as resnet_model
# pylint: disable=import-error
import shufflenet.model as snet_model
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit
def dump_static_graph(model, graph_name, shape):
model.eval()
data = mge.Tensor(np.ones(shape, dtype=np.uint8))
@jit.trace(capture_as_const=True)
def pred_func(data):
out = data.astype(np.float32)
output_h, output_w = 224, 224
# resize
print(shape)
M = mge.tensor(np.array([[1,0,0], [0,1,0], [0,0,1]], dtype=np.float32))
M_shape = F.concat([data.shape[0],M.shape])
M = F.broadcast_to(M, M_shape)
out = | F.vision.warp_perspective(out, M, (output_h, output_w), format='NHWC') | megengine.functional.vision.warp_perspective |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: skip-file
import argparse
import sys
# pylint: disable=import-error
import resnet.model as resnet_model
# pylint: disable=import-error
import shufflenet.model as snet_model
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import jit
def dump_static_graph(model, graph_name, shape):
model.eval()
data = mge.Tensor(np.ones(shape, dtype=np.uint8))
@jit.trace(capture_as_const=True)
def pred_func(data):
out = data.astype(np.float32)
output_h, output_w = 224, 224
# resize
print(shape)
M = mge.tensor(np.array([[1,0,0], [0,1,0], [0,0,1]], dtype=np.float32))
M_shape = F.concat([data.shape[0],M.shape])
M = F.broadcast_to(M, M_shape)
out = F.vision.warp_perspective(out, M, (output_h, output_w), format='NHWC')
# mean
_mean = mge.Tensor(np.array([103.530, 116.280, 123.675], dtype=np.float32))
out = | F.sub(out, _mean) | megengine.functional.sub |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.