prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: | MF.expand_dims(x, 0) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: | MF.leaky_relu(x, 0.5) | megengine.functional.leaky_relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: | MF.max_pool2d(x, 2) | megengine.functional.max_pool2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: | mge.random.normal(0, 1, x.shape) | megengine.random.normal |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: | MF.max(x, 0) | megengine.functional.max |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: | MF.mean(x, 0) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: | MF.mean(x, 0) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: | MF.repeat(x, 5) | megengine.functional.repeat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: | MF.split(x, 5) | megengine.functional.split |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: | MF.softmax(x, axis=1) | megengine.functional.softmax |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: | MF.squeeze(x, 0) | megengine.functional.squeeze |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: MF.squeeze(x, 0),
lambda x: torch.squeeze(x, 0),
[(1, 100, 100)],
[(1, 64, 512, 16, 16)],
True,
1000,
),
(
"stack",
MF.stack,
torch.stack,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
False,
10000,
),
(
"subtensor",
lambda x: x[0:20, 10:60],
lambda x: x[0:20, 10:60],
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"topk",
lambda x: | MF.topk(x, 10) | megengine.functional.topk |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: MF.squeeze(x, 0),
lambda x: torch.squeeze(x, 0),
[(1, 100, 100)],
[(1, 64, 512, 16, 16)],
True,
1000,
),
(
"stack",
MF.stack,
torch.stack,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
False,
10000,
),
(
"subtensor",
lambda x: x[0:20, 10:60],
lambda x: x[0:20, 10:60],
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"topk",
lambda x: MF.topk(x, 10),
lambda x: torch.topk(x, 10),
[(100, 100)],
[(1000, 1000)],
True,
1000,
),
(
"tile",
lambda x: MF.tile(x, (2,) * len(x.shape)),
lambda x: torch.tile(x, (2,) * len(x.shape)),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"transpose",
lambda x: MF.transpose(x, list(range(len(x.shape)))[::-1]),
lambda x: torch.permute(x, list(range(len(x.shape)))[::-1]),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"where",
lambda x: | MF.where(x > 0.5, x, x) | megengine.functional.where |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as TF
from tabulate import tabulate
import megengine as mge
import megengine.functional as MF
import megengine.module as MM
module_cache = {
"conv2d": (MM.Conv2d(32, 32, 3, 1, 0), nn.Conv2d(32, 32, 3, 1, 0).cuda()),
"dw_conv2d": (
MM.Conv2d(32, 32, 3, 1, 0, groups=32),
nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
),
"conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
"ConvTranspose2d": (
MM.ConvTranspose2d(32, 32, 3, 1, 0),
nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
),
"BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
"Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}
test_cases = [
# (mge op, torch op, small inps, large inps, unpack_inps, rep)
(
"adaptive_avg_pool2d",
lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"adaptive_max_pool2d",
lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
("argsort", MF.argsort, torch.argsort, [(1000,)], [(1000, 1000),], True, 1000),
(
"avg_pool2d",
lambda x: MF.avg_pool2d(x, 2),
lambda x: TF.avg_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"broadcast",
lambda x: MF.broadcast_to(x, (5,) + x.shape),
lambda x: torch.broadcast_to(x, (5,) + x.shape),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"batchedmatmul",
MF.matmul,
torch.matmul,
[(8, 64, 32), (8, 32, 64)],
[(8, 2048, 512), (8, 512, 2048)],
True,
1000,
),
(
"batchnrom2d",
lambda x: module_cache["BatchNorm2d"][0](x),
lambda x: module_cache["BatchNorm2d"][1](x),
[(2, 64, 16, 16)],
[(64, 64, 128, 128)],
True,
1000,
),
(
"concat",
MF.concat,
torch.cat,
[(20, 100), (50, 100), (30, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16), (64, 512, 16, 16)],
False,
1000,
),
(
"conv2d",
lambda x: module_cache["conv2d"][0](x),
lambda x: module_cache["conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"conv3d",
lambda x: module_cache["conv3d"][0](x),
lambda x: module_cache["conv3d"][1](x),
[(2, 32, 8, 8, 8)],
[(32, 32, 16, 16, 16)],
True,
1000,
),
(
"convTranspose2d",
lambda x: module_cache["ConvTranspose2d"][0](x),
lambda x: module_cache["ConvTranspose2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"dropout",
lambda x: MF.dropout(x, 0.5),
TF.dropout,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"dw_conv2d",
lambda x: module_cache["dw_conv2d"][0](x),
lambda x: module_cache["dw_conv2d"][1](x),
[(2, 32, 16, 16)],
[(32, 32, 128, 128)],
True,
1000,
),
(
"elemwise.unary",
MF.log,
torch.log,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"elemwise.binary",
MF.add,
torch.add,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
True,
1000,
),
(
"expand_dims",
lambda x: MF.expand_dims(x, 0),
lambda x: torch.unsqueeze(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("gelu", MF.gelu, TF.gelu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("hswish", MF.hswish, TF.hardswish, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"hsigmoid",
MF.hsigmoid,
TF.hardsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("isinf", MF.isinf, torch.isinf, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"indeixngMultiAxisVec",
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
lambda x: x[[1, 3, 5], [1, 3, 5], [1, 3, 5], [1, 3, 5]],
[(10, 10, 10, 10)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"logsigmoid",
MF.logsigmoid,
TF.logsigmoid,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"leaky_relu",
lambda x: MF.leaky_relu(x, 0.5),
lambda x: TF.leaky_relu(x, 0.5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"linear",
lambda x: module_cache["Linear"][0](x),
lambda x: module_cache["Linear"][1](x),
[(10, 1000)],
[(64, 128, 1000)],
True,
1000,
),
("matinv", MF.matinv, torch.inverse, [(10, 10)], [(30, 30)], True, 1000),
(
"matmul",
MF.matmul,
torch.matmul,
[(64, 32), (32, 64)],
[(2048, 1024), (1024, 2048)],
True,
1000,
),
(
"max_pool2d",
lambda x: MF.max_pool2d(x, 2),
lambda x: TF.max_pool2d(x, 2),
[(2, 32, 16, 16)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"normal",
lambda x: mge.random.normal(0, 1, x.shape),
lambda x: torch.randn(x.shape, device="cuda"),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"prelu",
MF.prelu,
TF.prelu,
[(100, 100), (1,)],
[(64, 512, 16, 16), (1,)],
True,
1000,
),
(
"reduce.max",
lambda x: MF.max(x, 0),
lambda x: torch.max(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"reduce.mean",
lambda x: MF.mean(x, 0),
lambda x: torch.mean(x, 0),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("relu", MF.relu, TF.relu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
("relu6", MF.relu6, TF.relu6, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"repeat",
lambda x: MF.repeat(x, 5),
lambda x: torch.repeat_interleave(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("silu", MF.silu, TF.silu, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"split",
lambda x: MF.split(x, 5),
lambda x: torch.split(x, 5),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
("sigmoid", MF.sigmoid, TF.sigmoid, [(100, 100)], [(64, 512, 16, 16)], True, 1000),
(
"softmax",
lambda x: MF.softmax(x, axis=1),
lambda x: TF.softmax(x, dim=1),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"softplus",
MF.softplus,
TF.softplus,
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"squeeze",
lambda x: MF.squeeze(x, 0),
lambda x: torch.squeeze(x, 0),
[(1, 100, 100)],
[(1, 64, 512, 16, 16)],
True,
1000,
),
(
"stack",
MF.stack,
torch.stack,
[(100, 100), (100, 100)],
[(64, 512, 16, 16), (64, 512, 16, 16)],
False,
10000,
),
(
"subtensor",
lambda x: x[0:20, 10:60],
lambda x: x[0:20, 10:60],
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"topk",
lambda x: MF.topk(x, 10),
lambda x: torch.topk(x, 10),
[(100, 100)],
[(1000, 1000)],
True,
1000,
),
(
"tile",
lambda x: MF.tile(x, (2,) * len(x.shape)),
lambda x: torch.tile(x, (2,) * len(x.shape)),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"transpose",
lambda x: MF.transpose(x, list(range(len(x.shape)))[::-1]),
lambda x: torch.permute(x, list(range(len(x.shape)))[::-1]),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"where",
lambda x: MF.where(x > 0.5, x, x),
lambda x: torch.where(x > 0.5, x, x),
[(100, 100)],
[(64, 512, 16, 16)],
True,
1000,
),
(
"uniform",
lambda x: | mge.random.uniform(0, 1, x.shape) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
remote_send(x, 1)
sync()
else: # remote recv
y = remote_recv(0)
assert y.device == get_default_device()
np.testing.assert_almost_equal(val, y.numpy())
val = np.random.random_sample(shape).astype(dtype)
worker(val, shape)
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("shape", [(), (1,), (4, 5)], ids=str)
def test_io_remote_multishape(shape):
run_io_remote(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
def test_io_remote_multidtype(dtype):
run_io_remote((8, 10), dtype)
@pytest.mark.require_ngpu(2)
def test_cuda_init_before_fork():
a = | mge.tensor(1, device="gpu0") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
remote_send(x, 1)
sync()
else: # remote recv
y = remote_recv(0)
assert y.device == get_default_device()
np.testing.assert_almost_equal(val, y.numpy())
val = np.random.random_sample(shape).astype(dtype)
worker(val, shape)
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("shape", [(), (1,), (4, 5)], ids=str)
def test_io_remote_multishape(shape):
run_io_remote(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
def test_io_remote_multidtype(dtype):
run_io_remote((8, 10), dtype)
@pytest.mark.require_ngpu(2)
def test_cuda_init_before_fork():
a = mge.tensor(1, device="gpu0")
@ | dist.launcher(n_gpus=2) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | reduce_sum(inp) | megengine.functional.distributed.reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | broadcast(inp) | megengine.functional.distributed.broadcast |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | all_gather(inp) | megengine.functional.distributed.all_gather |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | reduce_scatter_sum(inp) | megengine.functional.distributed.reduce_scatter_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | all_reduce_sum(inp) | megengine.functional.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | all_reduce_max(inp) | megengine.functional.distributed.all_reduce_max |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | all_reduce_min(inp) | megengine.functional.distributed.all_reduce_min |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | gather(inp) | megengine.functional.distributed.gather |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | scatter(inp) | megengine.functional.distributed.scatter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = | tensor(data[rank]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = | all_to_all(inp) | megengine.functional.distributed.all_to_all |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
remote_send(x, 1)
sync()
else: # remote recv
y = remote_recv(0)
assert y.device == get_default_device()
np.testing.assert_almost_equal(val, y.numpy())
val = np.random.random_sample(shape).astype(dtype)
worker(val, shape)
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("shape", [(), (1,), (4, 5)], ids=str)
def test_io_remote_multishape(shape):
run_io_remote(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.isolated_distributed
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
def test_io_remote_multidtype(dtype):
run_io_remote((8, 10), dtype)
@pytest.mark.require_ngpu(2)
def test_cuda_init_before_fork():
a = mge.tensor(1, device="gpu0")
@dist.launcher(n_gpus=2)
def worker():
a += 1
b = | mge.tensor(2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = | tensor(val, device="xpu0") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
| remote_send(x, 1) | megengine.functional.distributed.remote_send |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
remote_send(x, 1)
| sync() | megengine.core._imperative_rt.core2.sync |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
remote_send(x, 1)
sync()
else: # remote recv
y = | remote_recv(0) | megengine.functional.distributed.remote_recv |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine import Parameter, tensor
from megengine.core._imperative_rt.core2 import sync
from megengine.device import get_default_device, set_default_device
from megengine.functional.distributed import (
all_gather,
all_reduce_max,
all_reduce_min,
all_reduce_sum,
all_to_all,
broadcast,
gather,
reduce_scatter_sum,
reduce_sum,
remote_recv,
remote_send,
scatter,
)
def run_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_sum(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multishape(shape):
run_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_sum_multidtype(dtype):
run_reduce_sum((8, 10), dtype)
def run_broadcast(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = broadcast(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x, x)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multishape(shape):
run_broadcast(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_broadcast_multidtype(dtype):
run_broadcast((8, 10), dtype)
def run_all_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_gather(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multishape(shape):
run_all_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_gather_multidtype(dtype):
run_all_gather((8, 10), dtype)
def run_reduce_scatter_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = reduce_scatter_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z[: shape[0] // 2], z[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (88, 44)], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multishape(shape):
run_reduce_scatter_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_reduce_scatter_sum_multidtype(dtype):
run_reduce_scatter_sum((8, 10), dtype)
def run_all_reduce_sum(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_sum(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = x + y
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multishape(shape):
run_all_reduce_sum(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_sum_multidtype(dtype):
run_all_reduce_sum((8, 10), dtype)
def run_all_reduce_max(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_max(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.maximum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multishape(shape):
run_all_reduce_max(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_max_multidtype(dtype):
run_all_reduce_max((8, 10), dtype)
def run_all_reduce_min(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_reduce_min(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.minimum(x, y)
data = (x, y)
expect = (z, z)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(), (1,), (2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multishape(shape):
run_all_reduce_min(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_reduce_min_multidtype(dtype):
run_all_reduce_min((8, 10), dtype)
def run_gather(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = gather(inp)
if rank == 0:
assert np.allclose(output.numpy(), expect[rank])
else:
assert output is None
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
z = np.concatenate((x, y))
data = (x, y)
expect = (z, None)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (99, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multishape(shape):
run_gather(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_gather_multidtype(dtype):
run_gather((8, 10), dtype)
def run_scatter(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = scatter(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = x + 1
data = (x, y)
expect = (x[: shape[0] // 2], x[shape[0] // 2 :])
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multishape(shape):
run_scatter(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_scatter_multidtype(dtype):
run_scatter((8, 10), dtype)
def run_all_to_all(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(data, expect):
rank = dist.get_rank()
inp = tensor(data[rank])
output = all_to_all(inp)
assert np.allclose(output.numpy(), expect[rank])
x = np.random.random_sample(shape).astype(dtype)
y = np.random.random_sample(shape).astype(dtype)
a = np.concatenate((x[: shape[0] // 2], y[: shape[0] // 2]))
b = np.concatenate((x[shape[0] // 2 :], y[shape[0] // 2 :]))
data = (x, y)
expect = (a, b)
worker(data, expect)
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("shape", [(2, 3), (8, 10), (100, 77)], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multishape(shape):
run_all_to_all(shape, "float32")
@pytest.mark.require_ngpu(2)
@pytest.mark.parametrize("dtype", ["float32", "int32", "int8", "uint8"], ids=str)
@pytest.mark.isolated_distributed
def test_all_to_all_multidtype(dtype):
run_all_to_all((8, 10), dtype)
def run_io_remote(shape, dtype):
@dist.launcher(n_gpus=2)
def worker(val, shape):
rank = dist.get_rank()
if rank == 0: # remote send
x = tensor(val, device="xpu0")
remote_send(x, 1)
sync()
else: # remote recv
y = remote_recv(0)
assert y.device == | get_default_device() | megengine.device.get_default_device |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = | F.reshape(b, new_shape) | megengine.functional.reshape |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = F.selu(x)
elif act == 'softplus':
x = F.softplus(x)
elif act == 'swish':
x = F.sigmoid(x) * x
else:
raise NotImplementedError("activation \'{}\' is not implemented.".format(act))
act_x = x
# 乘以缩放因子
gain = float(gain)
if gain != 1:
x = x * gain
gain_x = x
# 限制范围
if clamp >= 0:
x = | F.clip(x, -clamp, clamp) | megengine.functional.clip |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = | F.relu(x) | megengine.functional.relu |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = F.selu(x)
elif act == 'softplus':
x = F.softplus(x)
elif act == 'swish':
x = F.sigmoid(x) * x
else:
raise NotImplementedError("activation \'{}\' is not implemented.".format(act))
act_x = x
# 乘以缩放因子
gain = float(gain)
if gain != 1:
x = x * gain
gain_x = x
# 限制范围
if clamp >= 0:
x = F.clip(x, -clamp, clamp)
clamp_x = x
return clamp_x
class FullyConnectedLayer(M.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = mge.Parameter(mge.tensor(np.random.randn(out_features, in_features).astype(np.float32)) / lr_multiplier)
self.bias = mge.Parameter(mge.tensor(np.ones(out_features, ).astype(np.float32) * bias_init)) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.weight_gain = float(self.weight_gain)
self.bias_gain = lr_multiplier
def forward(self, x):
# w = self.weight.to(x.dtype) * self.weight_gain
w = self.weight * self.weight_gain
b = self.bias
if b is not None:
# b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
out = F.matmul(x, w, transpose_b=True) + F.expand_dims(b, 0)
else:
# r = x.matmul(w.t())
r = | F.matmul(x, w, transpose_b=True) | megengine.functional.matmul |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = | F.leaky_relu(x, alpha) | megengine.functional.leaky_relu |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = F.selu(x)
elif act == 'softplus':
x = F.softplus(x)
elif act == 'swish':
x = F.sigmoid(x) * x
else:
raise NotImplementedError("activation \'{}\' is not implemented.".format(act))
act_x = x
# 乘以缩放因子
gain = float(gain)
if gain != 1:
x = x * gain
gain_x = x
# 限制范围
if clamp >= 0:
x = F.clip(x, -clamp, clamp)
clamp_x = x
return clamp_x
class FullyConnectedLayer(M.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = mge.Parameter(mge.tensor(np.random.randn(out_features, in_features).astype(np.float32)) / lr_multiplier)
self.bias = mge.Parameter(mge.tensor(np.ones(out_features, ).astype(np.float32) * bias_init)) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.weight_gain = float(self.weight_gain)
self.bias_gain = lr_multiplier
def forward(self, x):
# w = self.weight.to(x.dtype) * self.weight_gain
w = self.weight * self.weight_gain
b = self.bias
if b is not None:
# b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
out = | F.matmul(x, w, transpose_b=True) | megengine.functional.matmul |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = F.selu(x)
elif act == 'softplus':
x = F.softplus(x)
elif act == 'swish':
x = F.sigmoid(x) * x
else:
raise NotImplementedError("activation \'{}\' is not implemented.".format(act))
act_x = x
# 乘以缩放因子
gain = float(gain)
if gain != 1:
x = x * gain
gain_x = x
# 限制范围
if clamp >= 0:
x = F.clip(x, -clamp, clamp)
clamp_x = x
return clamp_x
class FullyConnectedLayer(M.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.activation = activation
self.weight = mge.Parameter(mge.tensor(np.random.randn(out_features, in_features).astype(np.float32)) / lr_multiplier)
self.bias = mge.Parameter(mge.tensor(np.ones(out_features, ).astype(np.float32) * bias_init)) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.weight_gain = float(self.weight_gain)
self.bias_gain = lr_multiplier
def forward(self, x):
# w = self.weight.to(x.dtype) * self.weight_gain
w = self.weight * self.weight_gain
b = self.bias
if b is not None:
# b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
out = F.matmul(x, w, transpose_b=True) + | F.expand_dims(b, 0) | megengine.functional.expand_dims |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = | F.tanh(x) | megengine.functional.tanh |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = | F.sigmoid(x) | megengine.functional.sigmoid |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = | F.elu(x) | megengine.functional.elu |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = | F.selu(x) | megengine.functional.selu |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = F.selu(x)
elif act == 'softplus':
x = | F.softplus(x) | megengine.functional.softplus |
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert clamp is None or clamp >= 0
def_gain = 1.0
if act in ['relu', 'lrelu', 'swish']: # 除了这些激活函数的def_gain = np.sqrt(2),其余激活函数的def_gain = 1.0
def_gain = np.sqrt(2)
def_alpha = 0.0
if act in ['lrelu']: # 除了这些激活函数的def_alpha = 0.2,其余激活函数的def_alpha = 0.0
def_alpha = 0.2
alpha = float(alpha if alpha is not None else def_alpha)
gain = float(gain if gain is not None else def_gain)
clamp = float(clamp if clamp is not None else -1)
# 加上偏移
if b is not None:
new_shape = [-1 if i == dim else 1 for i in range(x.ndim)]
b_ = F.reshape(b, new_shape)
x = x + b_
x_add_b = x
# 经过激活函数
alpha = float(alpha) # 只有leaky_relu需要
if act == 'linear':
pass
elif act == 'relu':
x = F.relu(x)
elif act == 'lrelu':
x = F.leaky_relu(x, alpha)
elif act == 'tanh':
x = F.tanh(x)
elif act == 'sigmoid':
x = F.sigmoid(x)
elif act == 'elu':
x = F.elu(x)
elif act == 'selu':
x = F.selu(x)
elif act == 'softplus':
x = F.softplus(x)
elif act == 'swish':
x = | F.sigmoid(x) | megengine.functional.sigmoid |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = | mge.get_logger(__name__) | megengine.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = | data.dataset.ImageNet(args.data, train=False) | megengine.data.dataset.ImageNet |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
| dist.helper.get_device_count_by_fork("gpu") | megengine.distributed.helper.get_device_count_by_fork |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = | dist.launcher(worker) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
| quantize_qat(model, qconfig=Q.ema_fakequant_qconfig) | megengine.quantization.quantize.quantize_qat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = | mge.load(args.checkpoint) | megengine.load |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
| quantize(model) | megengine.quantization.quantize.quantize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = | F.loss.cross_entropy(logits, label, label_smooth=0.1) | megengine.functional.loss.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = | F.topk_accuracy(logits, label, (1, 5)) | megengine.functional.topk_accuracy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if | dist.is_distributed() | megengine.distributed.is_distributed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
if rank == 0:
logger.info("TEST %f, %f", valid_acc, valid_acc5)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = | mge.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
if rank == 0:
logger.info("TEST %f, %f", valid_acc, valid_acc5)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = mge.tensor(image, dtype="float32")
label = | mge.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = | dist.functional.all_reduce_sum(loss) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = | dist.functional.all_reduce_sum(acc1) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = | dist.functional.all_reduce_sum(acc5) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
if rank == 0:
logger.info("TEST %f, %f", valid_acc, valid_acc5)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = mge.tensor(image, dtype="float32")
label = mge.tensor(label, dtype="int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[ | T.Resize(256) | megengine.data.transform.Resize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), | T.CenterCrop(224) | megengine.data.transform.CenterCrop |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument(
"-m",
"--mode",
default="qat",
type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
test_proc = dist.launcher(worker) if world_size > 1 else worker
test_proc(world_size, args)
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
model = models.__dict__[args.arch]()
if args.mode != "normal":
quantize_qat(model, qconfig=Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
quantize(model)
# Define valid graph
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), | T.Normalize(mean=128) | megengine.data.transform.Normalize |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.