prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if | dist.is_distributed() | megengine.distributed.is_distributed |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
_, valid_acc, valid_acc5 = infer(eval_func, valid_queue, args)
logger.info("TEST %f, %f", valid_acc, valid_acc5)
# save quantized model
mge.save(
{"step": -1, "state_dict": model.state_dict()},
os.path.join(save_dir, "checkpoint-calibration.pkl"),
)
logger.info(
"save in {}".format(os.path.join(save_dir, "checkpoint-calibration.pkl"))
)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = | mge.tensor(image, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
_, valid_acc, valid_acc5 = infer(eval_func, valid_queue, args)
logger.info("TEST %f, %f", valid_acc, valid_acc5)
# save quantized model
mge.save(
{"step": -1, "state_dict": model.state_dict()},
os.path.join(save_dir, "checkpoint-calibration.pkl"),
)
logger.info(
"save in {}".format(os.path.join(save_dir, "checkpoint-calibration.pkl"))
)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = mge.tensor(image, dtype="float32")
label = | mge.tensor(label, dtype="int32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = | dist.functional.all_reduce_sum(loss) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = | dist.functional.all_reduce_sum(acc1) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = | dist.functional.all_reduce_sum(acc5) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = | dist.functional.all_reduce_sum(loss) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = | dist.functional.all_reduce_sum(acc1) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = | dist.functional.all_reduce_sum(acc5) | megengine.distributed.functional.all_reduce_sum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), T.ToMode("CHW")]
),
num_workers=args.workers,
)
# calibration
model.fc.disable_quantize()
model = quantize_qat(model, qconfig=Q.calibration_qconfig)
# calculate scale
def calculate_scale(image, label):
model.eval()
enable_observer(model)
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
infer(calculate_scale, valid_queue, args)
# quantized
model = quantize(model)
# eval quantized model
def eval_func(image, label):
model.eval()
logits = model(image)
loss = F.loss.cross_entropy(logits, label, label_smooth=0.1)
acc1, acc5 = F.topk_accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.functional.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.functional.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
_, valid_acc, valid_acc5 = infer(eval_func, valid_queue, args)
logger.info("TEST %f, %f", valid_acc, valid_acc5)
# save quantized model
mge.save(
{"step": -1, "state_dict": model.state_dict()},
os.path.join(save_dir, "checkpoint-calibration.pkl"),
)
logger.info(
"save in {}".format(os.path.join(save_dir, "checkpoint-calibration.pkl"))
)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = mge.tensor(image, dtype="float32")
label = mge.tensor(label, dtype="int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[ | T.Resize(256) | megengine.data.transform.Resize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), | T.CenterCrop(224) | megengine.data.transform.CenterCrop |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), | T.Normalize(mean=128) | megengine.data.transform.Normalize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Finetune a pretrained fp32 with int8 post train quantization(calibration)"""
import argparse
import collections
import numbers
import os
import time
# pylint: disable=import-error
import models
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.quantization as Q
from megengine.quantization.quantize import enable_observer, quantize, quantize_qat
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument(
"-c",
"--checkpoint",
default=None,
type=str,
help="pretrained model to finetune",
)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = (
dist.helper.get_device_count_by_fork("gpu")
if args.ngpus is None
else args.ngpus
)
world_size = 1 if world_size == 0 else world_size
if world_size != 1:
logger.warning(
"Calibration only supports single GPU now, %d provided", world_size
)
proc_func = dist.launcher(worker) if world_size > 1 else worker
proc_func(world_size, args)
def get_parameters(model, cfg):
if isinstance(cfg.WEIGHT_DECAY, numbers.Number):
return {
"params": model.parameters(requires_grad=True),
"weight_decay": cfg.WEIGHT_DECAY,
}
groups = collections.defaultdict(list) # weight_decay -> List[param]
for pname, p in model.named_parameters(requires_grad=True):
wd = cfg.WEIGHT_DECAY(pname, p)
groups[wd].append(p)
groups = [
{"params": params, "weight_decay": wd} for wd, params in groups.items()
] # List[{param, weight_decay}]
return groups
def worker(world_size, args):
# pylint: disable=too-many-statements
rank = dist.get_rank()
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
save_dir = os.path.join(args.save, args.arch + "." + "calibration")
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
model = models.__dict__[args.arch]()
# load calibration model
assert args.checkpoint
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
# Build valid datasets
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[T.Resize(256), T.CenterCrop(224), T.Normalize(mean=128), | T.ToMode("CHW") | megengine.data.transform.ToMode |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = | M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) | megengine.module.Conv2d |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = | M.BatchNorm2d(64) | megengine.module.BatchNorm2d |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = | M.AvgPool2d(kernel_size=5, stride=5, padding=0) | megengine.module.AvgPool2d |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = | M.Linear(64, 10) | megengine.module.Linear |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = M.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = | F.relu(x) | megengine.functional.relu |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = M.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.avgpool(x)
x = | F.avg_pool2d(x, 22) | megengine.functional.avg_pool2d |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = M.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.avgpool(x)
x = F.avg_pool2d(x, 22)
x = | F.flatten(x, 1) | megengine.functional.flatten |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = M.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.avgpool(x)
x = F.avg_pool2d(x, 22)
x = F.flatten(x, 1)
x = self.fc(x)
return x
def save_grad_value(net):
for param in net.parameters():
param.grad_backup = param.grad.numpy().copy()
def test_clip_grad_norm():
net = Net()
x = mge.tensor(np.random.randn(10, 3, 224, 224))
gm = | ad.GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = M.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.avgpool(x)
x = F.avg_pool2d(x, 22)
x = F.flatten(x, 1)
x = self.fc(x)
return x
def save_grad_value(net):
for param in net.parameters():
param.grad_backup = param.grad.numpy().copy()
def test_clip_grad_norm():
net = Net()
x = mge.tensor(np.random.randn(10, 3, 224, 224))
gm = ad.GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
with gm:
loss = net(x).sum()
gm.backward(loss)
save_grad_value(net)
max_norm = 1.0
original_norm = optim.clip_grad_norm(net.parameters(), max_norm=max_norm, ord=2)
scale = max_norm / original_norm
for param in net.parameters():
np.testing.assert_almost_equal(param.grad.numpy(), param.grad_backup * scale)
opt.step().clear_grad()
def test_clip_grad_value():
net = Net()
x = np.random.randn(10, 3, 224, 224).astype("float32")
gm = | ad.GradManager() | megengine.autodiff.GradManager |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class Net(M.Module):
def __init__(self):
super().__init__()
self.conv1 = M.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = M.BatchNorm2d(64)
self.avgpool = M.AvgPool2d(kernel_size=5, stride=5, padding=0)
self.fc = M.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.avgpool(x)
x = F.avg_pool2d(x, 22)
x = F.flatten(x, 1)
x = self.fc(x)
return x
def save_grad_value(net):
for param in net.parameters():
param.grad_backup = param.grad.numpy().copy()
def test_clip_grad_norm():
net = Net()
x = mge.tensor(np.random.randn(10, 3, 224, 224))
gm = ad.GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
with gm:
loss = net(x).sum()
gm.backward(loss)
save_grad_value(net)
max_norm = 1.0
original_norm = optim.clip_grad_norm(net.parameters(), max_norm=max_norm, ord=2)
scale = max_norm / original_norm
for param in net.parameters():
np.testing.assert_almost_equal(param.grad.numpy(), param.grad_backup * scale)
opt.step().clear_grad()
def test_clip_grad_value():
net = Net()
x = np.random.randn(10, 3, 224, 224).astype("float32")
gm = ad.GradManager().attach(net.parameters())
opt = optim.SGD(net.parameters(), 1e-3, momentum=0.9)
with gm:
y = net( | mge.tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = | M.Conv2d(1, 6, 5) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = | M.MaxPool2d(2, 2) | megengine.module.MaxPool2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = | M.Conv2d(6, 16, 5) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = | M.MaxPool2d(2, 2) | megengine.module.MaxPool2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = | M.Linear(16 * 5 * 5, 120) | megengine.module.Linear |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = | M.Linear(120, 84) | megengine.module.Linear |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = | M.Linear(84, 10) | megengine.module.Linear |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = | F.flatten(x, 1) | megengine.functional.flatten |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = | M.Conv2d(1, 6, 5) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = | M.MaxPool2d(2, 2) | megengine.module.MaxPool2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = | M.Conv2d(6, 16, 5) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = | M.MaxPool2d(2, 2) | megengine.module.MaxPool2d |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = | M.Linear(16 * 53 * 53, 120) | megengine.module.Linear |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 53 * 53, 120)
self.relu3 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 53 * 53, 120)
self.relu3 = M.ReLU()
self.fc2 = | M.Linear(120, 84) | megengine.module.Linear |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 53 * 53, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = | M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 53 * 53, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = | M.Linear(84, 10) | megengine.module.Linear |
# -*- coding: utf-8 -*-
import megengine.module as M
import megengine.functional as F
class LeNet32x32(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
# Conv2d(1, 6, kernel_size=(5, 5))
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
# MaxPool2d(kernel_size=2, stride=2, padding=0)
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 5 * 5, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten reshape the tensor x with (N, C, H, W) into shape of (N, C*H*W)
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 5, 5)
x = F.flatten(x, 1)
# x.shape: (256, 400)
x = self.relu3(self.fc1(x))
x = self.relu4(self.fc2(x))
x = self.classifier(x)
return x
class LeNet224x224(M.Module):
def __init__(self):
super().__init__()
# single channel image, two 5x5 Conv + ReLU + Pool
self.conv1 = M.Conv2d(1, 6, 5)
self.relu1 = M.ReLU()
self.pool1 = M.MaxPool2d(2, 2)
self.conv2 = M.Conv2d(6, 16, 5)
self.relu2 = M.ReLU()
self.pool2 = M.MaxPool2d(2, 2)
# two FC + ReLU
self.fc1 = M.Linear(16 * 53 * 53, 120)
self.relu3 = M.ReLU()
self.fc2 = M.Linear(120, 84)
self.relu4 = M.ReLU()
# classifier
self.classifier = M.Linear(84, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
# F.flatten(x, 1) reshape the tensor x with (N, C, H, W) along
# the 1st dimension into shape of (N, C*H*W),
# i.e., x = x.reshape(x.shape[0], -1)
# x.shape: (256, 16, 53, 53)
x = | F.flatten(x, 1) | megengine.functional.flatten |
import megengine as mge
import megengine.functional as F
from megengine.core import Tensor
def softmax_loss(pred, label, ignore_label=-1):
max_pred = F.zero_grad(pred.max(axis=1, keepdims=True))
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask
loss = -(F.indexing_one_hot(log_prob, vlabel, 1) * mask)
return loss
def smooth_l1_loss(pred, target, beta: float):
abs_x = | F.abs(pred - target) | megengine.functional.abs |
import megengine as mge
import megengine.functional as F
from megengine.core import Tensor
def softmax_loss(pred, label, ignore_label=-1):
max_pred = F.zero_grad(pred.max(axis=1, keepdims=True))
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - | F.equal(label, ignore_label) | megengine.functional.equal |
import megengine as mge
import megengine.functional as F
from megengine.core import Tensor
def softmax_loss(pred, label, ignore_label=-1):
max_pred = F.zero_grad(pred.max(axis=1, keepdims=True))
pred -= max_pred
log_prob = pred - F.log(F.exp(pred).sum(axis=1, keepdims=True))
mask = 1 - F.equal(label, ignore_label)
vlabel = label * mask
loss = -( | F.indexing_one_hot(log_prob, vlabel, 1) | megengine.functional.indexing_one_hot |
import megengine as mge
import megengine.functional as F
from megengine.core import Tensor
def softmax_loss(pred, label, ignore_label=-1):
max_pred = F.zero_grad(pred.max(axis=1, keepdims=True))
pred -= max_pred
log_prob = pred - F.log( | F.exp(pred) | megengine.functional.exp |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = | TQT_Function(-127, 127) | megengine.quantization.fake_quant.TQT_Function |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = | tensor(a_np) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = | tensor(b_np) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
zero_point = | tensor([1.0], dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
zero_point = tensor([1.0], dtype=np.float32)
scale = | tensor([4.0], dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = | tensor(inp_data, dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = | tensor(inp_data, dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = | fake_quant_tensor(x, qmin, qmax, q_dict) | megengine.quantization.utils.fake_quant_tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = | tensor(inp_data, dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert | make_shape_tuple(x.grad.shape) | megengine.core.tensor.utils.make_shape_tuple |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == | make_shape_tuple(x1.grad.shape) | megengine.core.tensor.utils.make_shape_tuple |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = | fake_quant_tensor(inp, qmin, qmax, q_dict) | megengine.quantization.utils.fake_quant_tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.function import Function
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.fake_quant import TQT_Function
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor
class numpy_TQT_Function:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_TQT():
f = TQT_Function(-127, 127)
nf = numpy_TQT_Function(-127, 127)
def check_inp(a, b, c, a_np, b_np, c_np):
np.testing.assert_allclose(
f.forward(a, b).numpy(),
nf.forward(a_np, b_np).astype("float32"),
rtol=1e-6,
atol=1e-6,
)
c1, c2 = f.backward(c)
c1_np, c2_np = nf.backward(c_np)
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6)
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=5e-5)
a_np = np.random.random((4, 3)).astype("float32")
b_np = np.random.random((1)).astype("float32")
a = tensor(a_np)
b = tensor(b_np)
check_inp(a, b, b, a_np, b_np, b_np)
def _save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = | F.concat(box_logits_list, axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = | F.concat(box_offsets_list, axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = | F.concat(anchors_list, axis=0) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = | F.sigmoid(pred_logits) | megengine.functional.sigmoid |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
F.expand_dims(boxes_info[:, :4], axis=1), (num_gt, bucket_size, 4)
).reshape(-1, 4)
matched_offsets = self.box_coder.encode(topk_anchors, boxes_broad_cast)
reg_loss = layers.smooth_l1_loss(
pred_offsets[bid, matched_idx_flatten],
matched_offsets,
beta=self.cfg.smooth_l1_beta
).sum(axis=-1) * self.cfg.reg_loss_weight
matched_reg_scores = F.exp(-reg_loss)
positive_losses.append(
positive_bag_loss(
matched_score * matched_reg_scores.reshape(-1, bucket_size), axis=1
)
)
num_foreground = im_info[:, 4].sum()
pos_loss = F.concat(positive_losses).sum() / F.maximum(num_foreground, 1)
box_probs = | F.stack(box_prob_list, axis=0) | megengine.functional.stack |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = | F.broadcast_to(gather_idx, (num_gt, bucket_size)) | megengine.functional.broadcast_to |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = | F.indexing_one_hot(gather_src, gather_idx, axis=2) | megengine.functional.indexing_one_hot |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
F.expand_dims(boxes_info[:, :4], axis=1), (num_gt, bucket_size, 4)
).reshape(-1, 4)
matched_offsets = self.box_coder.encode(topk_anchors, boxes_broad_cast)
reg_loss = layers.smooth_l1_loss(
pred_offsets[bid, matched_idx_flatten],
matched_offsets,
beta=self.cfg.smooth_l1_beta
).sum(axis=-1) * self.cfg.reg_loss_weight
matched_reg_scores = | F.exp(-reg_loss) | megengine.functional.exp |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
F.expand_dims(boxes_info[:, :4], axis=1), (num_gt, bucket_size, 4)
).reshape(-1, 4)
matched_offsets = self.box_coder.encode(topk_anchors, boxes_broad_cast)
reg_loss = layers.smooth_l1_loss(
pred_offsets[bid, matched_idx_flatten],
matched_offsets,
beta=self.cfg.smooth_l1_beta
).sum(axis=-1) * self.cfg.reg_loss_weight
matched_reg_scores = F.exp(-reg_loss)
positive_losses.append(
positive_bag_loss(
matched_score * matched_reg_scores.reshape(-1, bucket_size), axis=1
)
)
num_foreground = im_info[:, 4].sum()
pos_loss = F.concat(positive_losses).sum() / | F.maximum(num_foreground, 1) | megengine.functional.maximum |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
F.expand_dims(boxes_info[:, :4], axis=1), (num_gt, bucket_size, 4)
).reshape(-1, 4)
matched_offsets = self.box_coder.encode(topk_anchors, boxes_broad_cast)
reg_loss = layers.smooth_l1_loss(
pred_offsets[bid, matched_idx_flatten],
matched_offsets,
beta=self.cfg.smooth_l1_beta
).sum(axis=-1) * self.cfg.reg_loss_weight
matched_reg_scores = F.exp(-reg_loss)
positive_losses.append(
positive_bag_loss(
matched_score * matched_reg_scores.reshape(-1, bucket_size), axis=1
)
)
num_foreground = im_info[:, 4].sum()
pos_loss = F.concat(positive_losses).sum() / F.maximum(num_foreground, 1)
box_probs = F.stack(box_prob_list, axis=0)
neg_loss = negative_bag_loss(
pred_scores * (1 - box_probs), self.cfg.focal_loss_gamma
).sum() / | F.maximum(num_foreground * bucket_size, 1) | megengine.functional.maximum |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = | F.sigmoid(all_level_box_logits) | megengine.functional.sigmoid |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = | F.cond_take(gt_pred_prob != 0, gt_pred_prob) | megengine.functional.cond_take |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = | F.zeros(pred_logits.shape[1:]) | megengine.functional.zeros |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
F.expand_dims(boxes_info[:, :4], axis=1), (num_gt, bucket_size, 4)
).reshape(-1, 4)
matched_offsets = self.box_coder.encode(topk_anchors, boxes_broad_cast)
reg_loss = layers.smooth_l1_loss(
pred_offsets[bid, matched_idx_flatten],
matched_offsets,
beta=self.cfg.smooth_l1_beta
).sum(axis=-1) * self.cfg.reg_loss_weight
matched_reg_scores = F.exp(-reg_loss)
positive_losses.append(
positive_bag_loss(
matched_score * matched_reg_scores.reshape(-1, bucket_size), axis=1
)
)
num_foreground = im_info[:, 4].sum()
pos_loss = | F.concat(positive_losses) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
import megengine.module as M
import models.backbones.resnet.model as resnet
import layers
class FreeAnchor(M.Module):
"""
Implement RetinaNet (https://arxiv.org/abs/1708.02002).
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=self.cfg.anchor_scales,
anchor_ratios=self.cfg.anchor_ratios,
strides=self.cfg.stride,
offset=self.cfg.anchor_offset,
)
self.box_coder = layers.BoxCoder(cfg.reg_mean, cfg.reg_std)
self.in_features = cfg.in_features
# ----------------------- build backbone ------------------------ #
bottom_up = getattr(resnet, cfg.backbone)(
norm=layers.get_norm(cfg.backbone_norm), pretrained=cfg.backbone_pretrained
)
del bottom_up.fc
# ----------------------- build FPN ----------------------------- #
self.backbone = layers.FPN(
bottom_up=bottom_up,
in_features=cfg.fpn_in_features,
out_channels=cfg.fpn_out_channels,
norm=cfg.fpn_norm,
top_block=layers.LastLevelP6P7(
cfg.fpn_top_in_channel, cfg.fpn_out_channels, cfg.fpn_top_in_feature
),
strides=cfg.fpn_in_strides,
channels=cfg.fpn_in_channels,
)
backbone_shape = self.backbone.output_shape()
feature_shapes = [backbone_shape[f] for f in self.in_features]
# ----------------------- build FreeAnchor Head ----------------- #
self.head = layers.BoxHead(cfg, feature_shapes)
def preprocess_image(self, image):
padded_image = layers.get_padded_tensor(image, 32, 0.0)
normed_image = (
padded_image
- np.array(self.cfg.img_mean, dtype="float32")[None, :, None, None]
) / np.array(self.cfg.img_std, dtype="float32")[None, :, None, None]
return normed_image
def forward(self, image, im_info, gt_boxes=None):
image = self.preprocess_image(image)
features = self.backbone(image)
features = [features[f] for f in self.in_features]
box_logits, box_offsets = self.head(features)
box_logits_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, self.cfg.num_classes)
for _ in box_logits
]
box_offsets_list = [
_.transpose(0, 2, 3, 1).reshape(image.shape[0], -1, 4) for _ in box_offsets
]
anchors_list = self.anchor_generator(features)
all_level_box_logits = F.concat(box_logits_list, axis=1)
all_level_box_offsets = F.concat(box_offsets_list, axis=1)
all_level_anchors = F.concat(anchors_list, axis=0)
if self.training:
loss_dict = self.get_losses(
all_level_anchors, all_level_box_logits,
all_level_box_offsets, gt_boxes, im_info
)
self.cfg.losses_keys = list(loss_dict.keys())
return loss_dict
else:
# currently not support multi-batch testing
assert image.shape[0] == 1
pred_boxes = self.box_coder.decode(
all_level_anchors, all_level_box_offsets[0]
)
pred_boxes = pred_boxes.reshape(-1, 4)
scale_w = im_info[0, 1] / im_info[0, 3]
scale_h = im_info[0, 0] / im_info[0, 2]
pred_boxes = pred_boxes / F.concat(
[scale_w, scale_h, scale_w, scale_h], axis=0
)
clipped_boxes = layers.get_clipped_boxes(
pred_boxes, im_info[0, 2:4]
).reshape(-1, 4)
pred_score = F.sigmoid(all_level_box_logits)[0]
return pred_score, clipped_boxes
def get_losses(self, anchors, pred_logits, pred_offsets, gt_boxes, im_info):
# pylint: disable=too-many-statements
def positive_bag_loss(logits, axis=1):
weight = 1.0 / (1.0 - logits)
weight /= weight.sum(axis=axis, keepdims=True)
bag_prob = (weight * logits).sum(axis=1)
return -layers.safelog(bag_prob)
def negative_bag_loss(logits, gamma):
return (logits ** gamma) * (-layers.safelog(1.0 - logits))
pred_scores = F.sigmoid(pred_logits)
box_prob_list = []
positive_losses = []
clamp_eps = 1e-7
bucket_size = self.cfg.bucket_size
for bid in range(im_info.shape[0]):
boxes_info = gt_boxes[bid, : im_info[bid, 4].astype("int32")]
# id 0 is used for background classes, so -1 first
labels = boxes_info[:, 4].astype("int32") - 1
pred_box = self.box_coder.decode(anchors, pred_offsets[bid]).detach()
overlaps = layers.get_iou(boxes_info[:, :4], pred_box).detach()
thresh1 = self.cfg.box_iou_threshold
thresh2 = F.clip(
overlaps.max(axis=1, keepdims=True),
lower=thresh1 + clamp_eps, upper=1.0
)
gt_pred_prob = F.clip(
(overlaps - thresh1) / (thresh2 - thresh1), lower=0, upper=1.0)
image_boxes_prob = F.zeros(pred_logits.shape[1:]).detach()
# guarantee that nonzero_idx is not empty
if gt_pred_prob.max() > clamp_eps:
_, nonzero_idx = F.cond_take(gt_pred_prob != 0, gt_pred_prob)
# since nonzeros is only 1 dim, use num_anchor to get real indices
num_anchors = gt_pred_prob.shape[1]
anchors_idx = nonzero_idx % num_anchors
gt_idx = nonzero_idx // num_anchors
image_boxes_prob[anchors_idx, labels[gt_idx]] = gt_pred_prob[gt_idx, anchors_idx]
box_prob_list.append(image_boxes_prob)
# construct bags for objects
match_quality_matrix = layers.get_iou(boxes_info[:, :4], anchors).detach()
num_gt = match_quality_matrix.shape[0]
_, matched_idx = F.topk(
match_quality_matrix,
k=bucket_size,
descending=True,
no_sort=True,
)
matched_idx = matched_idx.detach()
matched_idx_flatten = matched_idx.reshape(-1)
gather_idx = labels.reshape(-1, 1)
gather_idx = F.broadcast_to(gather_idx, (num_gt, bucket_size))
gather_src = pred_scores[bid, matched_idx_flatten]
gather_src = gather_src.reshape(num_gt, bucket_size, -1)
matched_score = F.indexing_one_hot(gather_src, gather_idx, axis=2)
topk_anchors = anchors[matched_idx_flatten]
boxes_broad_cast = F.broadcast_to(
| F.expand_dims(boxes_info[:, :4], axis=1) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
M.init.normal_(self.last_layer.weight, std=0.001)
M.init.zeros_(self.last_layer.bias)
def forward(self, x):
f = self.backbone.extract_features(x)["res5"]
f = self.deconv_layers(f)
pred = self.last_layer(f)
return pred
class SimpleBaseline_Config:
initial_deconv_channels = 2048
num_deconv_layers = 3
deconv_channels = [256, 256, 256]
deconv_kernel_sizes = [4, 4, 4]
deconv_with_bias = False
keypoint_num = 17
backbone_pretrained = True
cfg = SimpleBaseline_Config()
@ | hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline50_256x192_0_255_71_2.pkl"
) | megengine.hub.pretrained |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
M.init.normal_(self.last_layer.weight, std=0.001)
M.init.zeros_(self.last_layer.bias)
def forward(self, x):
f = self.backbone.extract_features(x)["res5"]
f = self.deconv_layers(f)
pred = self.last_layer(f)
return pred
class SimpleBaseline_Config:
initial_deconv_channels = 2048
num_deconv_layers = 3
deconv_channels = [256, 256, 256]
deconv_kernel_sizes = [4, 4, 4]
deconv_with_bias = False
keypoint_num = 17
backbone_pretrained = True
cfg = SimpleBaseline_Config()
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline50_256x192_0_255_71_2.pkl"
)
def simplebaseline_res50(**kwargs):
model = SimpleBaseline(backbone="resnet50", cfg=cfg, **kwargs)
return model
@ | hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline101_256x192_0_255_72_2.pkl"
) | megengine.hub.pretrained |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
M.init.normal_(self.last_layer.weight, std=0.001)
M.init.zeros_(self.last_layer.bias)
def forward(self, x):
f = self.backbone.extract_features(x)["res5"]
f = self.deconv_layers(f)
pred = self.last_layer(f)
return pred
class SimpleBaseline_Config:
initial_deconv_channels = 2048
num_deconv_layers = 3
deconv_channels = [256, 256, 256]
deconv_kernel_sizes = [4, 4, 4]
deconv_with_bias = False
keypoint_num = 17
backbone_pretrained = True
cfg = SimpleBaseline_Config()
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline50_256x192_0_255_71_2.pkl"
)
def simplebaseline_res50(**kwargs):
model = SimpleBaseline(backbone="resnet50", cfg=cfg, **kwargs)
return model
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline101_256x192_0_255_72_2.pkl"
)
def simplebaseline_res101(**kwargs):
model = SimpleBaseline(backbone="resnet101", cfg=cfg, **kwargs)
return model
@ | hub.pretrained(
"https://data.megengine.org.cn/models/weights/simplebaseline152_256x192_0_255_72_4.pkl"
) | megengine.hub.pretrained |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = | M.Sequential(*_body) | megengine.module.Sequential |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = | M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = | F.square_loss(out * valid, label * valid) | megengine.functional.square_loss |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
| M.init.normal_(self.last_layer.weight, std=0.001) | megengine.module.init.normal_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
out = self.forward(self.inputs["image"])
valid = self.inputs["heat_valid"][:, :, None, None]
label = self.inputs["heatmap"][:, -1]
loss = F.square_loss(out * valid, label * valid)
return loss
def predict(self):
return self.forward(self.inputs["image"])
def _initialize_weights(self):
for k, m in self.deconv_layers.named_modules():
if isinstance(m, M.ConvTranspose2d):
M.init.normal_(m.weight, std=0.001)
if self.cfg.deconv_with_bias:
M.init.zeros_(m.bias)
if isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
M.init.normal_(self.last_layer.weight, std=0.001)
| M.init.zeros_(self.last_layer.bias) | megengine.module.init.zeros_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": | mge.tensor(dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": | mge.tensor(dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
M.ReLU(),
]
nf1 = nf2s[i]
self.body = M.Sequential(*_body)
def forward(self, x):
return self.body(x)
class SimpleBaseline(M.Module):
def __init__(self, backbone, cfg):
super(SimpleBaseline, self).__init__()
norm = M.BatchNorm2d
self.backbone = getattr(resnet, backbone)(
norm=norm, pretrained=cfg.backbone_pretrained
)
del self.backbone.fc
self.cfg = cfg
self.deconv_layers = DeconvLayers(
cfg.initial_deconv_channels,
cfg.deconv_channels,
cfg.deconv_kernel_sizes,
cfg.num_deconv_layers,
cfg.deconv_with_bias,
norm,
)
self.last_layer = M.Conv2d(cfg.deconv_channels[-1], cfg.keypoint_num, 3, 1, 1)
self._initialize_weights()
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": | mge.tensor(dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
| M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias) | megengine.module.ConvTranspose2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import official.vision.classification.resnet.model as resnet
import numpy as np
class DeconvLayers(M.Module):
def __init__(self, nf1, nf2s, kernels, num_layers, bias=True, norm=M.BatchNorm2d):
super(DeconvLayers, self).__init__()
_body = []
for i in range(num_layers):
kernel = kernels[i]
padding = (
kernel // 3
) # padding=0 when kernel=2 and padding=1 when kernel=4 or kernel=3
_body += [
M.ConvTranspose2d(nf1, nf2s[i], kernel, 2, padding, bias=bias),
norm(nf2s[i]),
| M.ReLU() | megengine.module.ReLU |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.