prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
| M.init.normal_(m.weight, std=0.001) | megengine.module.init.normal_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
| M.init.ones_(m.weight) | megengine.module.init.ones_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
| M.init.zeros_(m.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
| M.init.zeros_(m.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = | mge.get_logger(__name__) | megengine.get_logger |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = | data.dataset.ImageNet(args.data, train=True) | megengine.data.dataset.ImageNet |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
train_queue = iter(train_queue)
valid_dataset = | data.dataset.ImageNet(args.data, train=False) | megengine.data.dataset.ImageNet |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = | mge.get_device_count("gpu") | megengine.get_device_count |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = | F.cross_entropy_with_softmax(logits, label, label_smooth=0.1) | megengine.functional.cross_entropy_with_softmax |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = | F.accuracy(logits, label, (1, 5)) | megengine.functional.accuracy |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if | dist.is_distributed() | megengine.distributed.is_distributed |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = | F.cross_entropy_with_softmax(logits, label, label_smooth=0.1) | megengine.functional.cross_entropy_with_softmax |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = | F.accuracy(logits, label, (1, 5)) | megengine.functional.accuracy |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if | dist.is_distributed() | megengine.distributed.is_distributed |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict( | mge.load(args.model) | megengine.load |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = | dist.all_reduce_sum(loss) | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = | dist.all_reduce_sum(acc1) | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = | dist.all_reduce_sum(acc5) | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = | dist.all_reduce_sum(loss) | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = | dist.all_reduce_sum(acc1) | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = | dist.all_reduce_sum(acc5) | megengine.distributed.all_reduce_sum |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
train_queue = iter(train_queue)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
# Start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step in range(step_start, args.steps + 1):
# Linear learning rate decay
decay = 1.0
decay = 1 - float(step) / args.steps if step < args.steps else 0
for param_group in optimizer.param_groups:
param_group["lr"] = args.learning_rate * decay
image, label = next(train_queue)
time_data=time.time()-t
image = image.astype("float32")
label = label.astype("int32")
n = image.shape[0]
optimizer.zero_grad()
loss, acc1, acc5 = train_func(image, label)
optimizer.step()
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
objs.update(loss.numpy()[0], n)
total_time.update(time.time() - t)
time_iter=time.time()-t
t = time.time()
if step % args.report_freq == 0 and rank == 0:
logger.info(
"TRAIN Iter %06d: lr = %f,\tloss = %f,\twc_loss = 1,\tTop-1 err = %f,\tTop-5 err = %f,\tdata_time = %f,\ttrain_time = %f,\tremain_hours=%f",
step,
args.learning_rate * decay,
float(objs.__str__().split()[1]),
1-float(top1.__str__().split()[1])/100,
1-float(top5.__str__().split()[1])/100,
time_data,
time_iter - time_data,
time_iter * (args.steps - step) / 3600,
)
writers['train'].add_scalar('loss', float(objs.__str__().split()[1]), global_step=step)
writers['train'].add_scalar('top1_err', 1-float(top1.__str__().split()[1])/100, global_step=step)
writers['train'].add_scalar('top5_err', 1-float(top5.__str__().split()[1])/100, global_step=step)
objs.reset()
top1.reset()
top5.reset()
total_time.reset()
if step % 10000 == 0 and rank == 0 and step != 0:
logger.info("SAVING %06d", step)
mge.save(
model.state_dict(),
os.path.join(save_dir, "checkpoint-{:06d}.pkl".format(step)),
)
if step % 10000 == 0 and step != 0:
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST Iter %06d: loss = %f,\tTop-1 err = %f,\tTop-5 err = %f", step, loss, 1-valid_acc/100, 1-valid_acc5/100)
if rank == 0:
writers['valid'].add_scalar('loss', loss, global_step=step)
writers['valid'].add_scalar('top1_err', 1-valid_acc/100, global_step=step)
writers['valid'].add_scalar('top5_err', 1-valid_acc5/100, global_step=step)
mge.save(
model.state_dict(), os.path.join(save_dir, "checkpoint-{:06d}.pkl".format(step))
)
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST Iter %06d: loss=%f,\tTop-1 err = %f,\tTop-5 err = %f", step, _, 1-valid_acc/100, 1-valid_acc5/100)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
| T.RandomResizedCrop(224) | megengine.data.transform.RandomResizedCrop |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
| T.RandomHorizontalFlip() | megengine.data.transform.RandomHorizontalFlip |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
| T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4) | megengine.data.transform.ColorJitter |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
| T.ToMode("CHW") | megengine.data.transform.ToMode |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
train_queue = iter(train_queue)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
| T.Resize(256) | megengine.data.transform.Resize |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
train_queue = iter(train_queue)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
| T.CenterCrop(224) | megengine.data.transform.CenterCrop |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
train_queue = iter(train_queue)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
| T.ToMode("CHW") | megengine.data.transform.ToMode |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
| R.manual_seed(0) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
| R.manual_seed(0) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = | mge.ones(10) | megengine.ones |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = | F.dropout(x, 0.5) | megengine.functional.dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = | F.dropout(x, 0.5) | megengine.functional.dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = | mge.ones(10) | megengine.ones |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
| R.manual_seed(0) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = | F.dropout(x, 0.5) | megengine.functional.dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
| R.manual_seed(0) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = | F.dropout(x, 0.5) | megengine.functional.dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = | M.Dropout(0.5) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
a = graph_a(x)
a = a.numpy().copy()
b = graph_b(x)
c = graph_a(x)
assert np.any(a != b.numpy())
assert np.any(a != c.numpy())
def test_M_dropout_static_same_result():
m = | M.Dropout(0.5) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
a = graph_a(x)
a = a.numpy().copy()
b = graph_b(x)
c = graph_a(x)
assert np.any(a != b.numpy())
assert np.any(a != c.numpy())
def test_M_dropout_static_same_result():
m = M.Dropout(0.5)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
a = graph_a(x)
a = a.numpy().copy()
b = graph_b(x)
c = graph_a(x)
assert np.any(a != b.numpy())
assert np.any(a != c.numpy())
def test_M_dropout_static_same_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@ | jit.trace(symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
a = graph_a(x)
a = a.numpy().copy()
b = graph_b(x)
c = graph_a(x)
assert np.any(a != b.numpy())
assert np.any(a != c.numpy())
def test_M_dropout_static_same_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
| R.manual_seed(0) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
a = graph_a(x)
a = a.numpy().copy()
b = graph_b(x)
c = graph_a(x)
assert np.any(a != b.numpy())
assert np.any(a != c.numpy())
def test_M_dropout_static_same_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
R.manual_seed(0)
a = graph_a(x)
a = a.numpy().copy()
| R.manual_seed(0) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + R.gaussian(5)
assert np.all(a.numpy() == b.numpy())
def test_dropout_dynamic_diff_result():
x = mge.ones(10)
a = F.dropout(x, 0.5)
b = F.dropout(x, 0.5)
assert np.any(a.numpy() != b.numpy())
def test_dropout_dynamic_same_result():
x = mge.ones(10)
R.manual_seed(0)
a = F.dropout(x, 0.5)
R.manual_seed(0)
b = F.dropout(x, 0.5)
assert np.all(a.numpy() == b.numpy())
def test_M_dropout_static_diff_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
a = graph_a(x)
a = a.numpy().copy()
b = graph_b(x)
c = graph_a(x)
assert np.any(a != b.numpy())
assert np.any(a != c.numpy())
def test_M_dropout_static_same_result():
m = M.Dropout(0.5)
@jit.trace(symbolic=True)
def graph_a(x):
return m(x)
@jit.trace(symbolic=True)
def graph_b(x):
return m(x)
x = np.ones(10, dtype="float32")
R.manual_seed(0)
a = graph_a(x)
a = a.numpy().copy()
R.manual_seed(0)
b = graph_b(x)
| R.manual_seed(0) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
| R.manual_seed(731) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
| R.manual_seed(731) | megengine.random.manual_seed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.all(a.numpy() == b.numpy())
def test_random_dynamic_diff_result():
a = R.uniform(5) + R.gaussian(5)
b = R.uniform(5) + R.gaussian(5)
assert np.any(a.numpy() != b.numpy())
def test_random_dynamic_same_result():
R.manual_seed(0)
a = R.uniform(5) + R.gaussian(5)
R.manual_seed(0)
b = R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return | R.uniform(5) | megengine.random.uniform |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.jit as jit
import megengine.module as M
import megengine.random as R
def test_random_static_diff_result():
@jit.trace(symbolic=True)
def graph_a():
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
return R.uniform(5) + R.gaussian(5)
a = graph_a()
b = graph_b()
assert np.any(a.numpy() != b.numpy())
def test_random_static_same_result():
@jit.trace(symbolic=True)
def graph_a():
R.manual_seed(731)
return R.uniform(5) + R.gaussian(5)
@jit.trace(symbolic=True)
def graph_b():
R.manual_seed(731)
return R.uniform(5) + | R.gaussian(5) | megengine.random.gaussian |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = | mge.get_logger(__name__) | megengine.get_logger |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
| mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0) | megengine.device.set_prealloc_config |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = | GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@ | amp.autocast(enabled=args.mode == "mp") | megengine.amp.autocast |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = | dist.launcher(worker, n_gpus=args.ngpus) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
loss_meter.update([loss.numpy().item() for loss in loss_list])
tok = time.time()
time_meter.update([tok - tik, data_tok - tik])
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, Loss ({}), Time (tot:{:.3f}, data:{:.3f})".format(
step,
"".join(["{:.3f} ".format(t) for t in loss_meter.average()]),
*time_meter.average(),
))
loss_meter.reset()
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = | jit.trace(train_func, symbolic=False, symbolic_shape=False) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
loss_meter.update([loss.numpy().item() for loss in loss_list])
tok = time.time()
time_meter.update([tok - tik, data_tok - tik])
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, Loss ({}), Time (tot:{:.3f}, data:{:.3f})".format(
step,
"".join(["{:.3f} ".format(t) for t in loss_meter.average()]),
*time_meter.average(),
))
loss_meter.reset()
if dist.get_rank() == 0:
print("="*20, "summary", "="*20)
print(" benchmark: detection")
if args.trace:
print(" mode: trace(symbolic={})".format("True, sublinear=True" if args.symbolic else "False"))
else:
print(" mode: imperative")
print(" loader: {}".format("" if not args.loader else "--loader"))
if args.loader:
print(" preload: {}".format("" if not args.preload else "--preload"))
print(" arch: {}".format(args.arch))
print("train_mode: {}".format(args.mode))
print(" batchsize: {}".format(args.batch_size))
print(" #GPU: {}".format(args.ngpus))
print(" avg time: {:.3f} seconds".format(time_meter.average()[0]))
# pylint: disable=unused-argument
def build_dataset(dataset_dir, cfg):
return PseudoDetectionDataset(order=["image", "boxes", "boxes_category", "info"])
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite( | RandomSampler(train_dataset, batch_size, drop_last=True) | megengine.data.RandomSampler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image= | mge.tensor(mini_batch["data"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info= | mge.tensor(mini_batch["im_info"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes= | mge.tensor(mini_batch["gt_boxes"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image= | mge.tensor(mini_batch["data"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info= | mge.tensor(mini_batch["im_info"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes= | mge.tensor(mini_batch["gt_boxes"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
loss_meter.update([loss.numpy().item() for loss in loss_list])
tok = time.time()
time_meter.update([tok - tik, data_tok - tik])
if step % args.print_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[ | dist.make_allreduce_cb("SUM", dist.WORLD) | megengine.distributed.make_allreduce_cb |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config= | jit.SublinearMemoryConfig(genetic_nr_iter=50) | megengine.jit.SublinearMemoryConfig |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
loss_meter.update([loss.numpy().item() for loss in loss_list])
tok = time.time()
time_meter.update([tok - tik, data_tok - tik])
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, Loss ({}), Time (tot:{:.3f}, data:{:.3f})".format(
step,
"".join(["{:.3f} ".format(t) for t in loss_meter.average()]),
*time_meter.average(),
))
loss_meter.reset()
if dist.get_rank() == 0:
print("="*20, "summary", "="*20)
print(" benchmark: detection")
if args.trace:
print(" mode: trace(symbolic={})".format("True, sublinear=True" if args.symbolic else "False"))
else:
print(" mode: imperative")
print(" loader: {}".format("" if not args.loader else "--loader"))
if args.loader:
print(" preload: {}".format("" if not args.preload else "--preload"))
print(" arch: {}".format(args.arch))
print("train_mode: {}".format(args.mode))
print(" batchsize: {}".format(args.batch_size))
print(" #GPU: {}".format(args.ngpus))
print(" avg time: {:.3f} seconds".format(time_meter.average()[0]))
# pylint: disable=unused-argument
def build_dataset(dataset_dir, cfg):
return PseudoDetectionDataset(order=["image", "boxes", "boxes_category", "info"])
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg, preloader= False):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
| T.RandomHorizontalFlip() | megengine.data.transform.RandomHorizontalFlip |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.