prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import os
import time
import pickle
import numpy as np
import megengine.amp as amp
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import SGD
import megengine.jit as jit
from tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--arch", type=str, help="model architecture",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-s", "--steps", default=100, type=int, help="number of train steps (default: 100)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--preloader",
action='store_true',
default=False,
help="whether use preloader or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"-d", "--loader", default=False, action="store_true", help="use pseudo detection dataset loader",
)
parser.add_argument(
"-p", "--print-freq", default=1, type=int, help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16",
)
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
if args.ngpus > 1:
train_func = dist.launcher(worker, n_gpus=args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
config_file = {
"faster_rcnn": "configs/faster_rcnn_res50_coco_1x_800size.py",
"atss": "configs/atss_res50_coco_1x_800size.py",
"retinanet": "configs/retinanet_res50_coco_1x_800size.py",
}[args.arch]
current_network = import_from_file(config_file)
model = current_network.Net(current_network.Cfg())
model.train()
# if dist.get_rank() == 0:
# logger.info(get_config_info(model.cfg))
# logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
train_loader = None
for epoch in range(1):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
@amp.autocast(enabled=args.mode == "mp")
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
if args.trace:
if args.symbolic:
train_func = jit.trace(train_func, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=True)
else:
train_func = jit.trace(train_func, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
loss_meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
file_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(file_dir, 'batch.pkl') # batch_size for batch.pkl is 2
mini_batch = pickle.load(open(data_path, "rb"))
if args.batch_size != 2:
repeats = (args.batch_size+1) // 2
mini_batch['data'] = np.concatenate([mini_batch['data'],]*repeats)[:args.batch_size]
mini_batch['im_info'] = np.concatenate([mini_batch['im_info'],]*repeats)[:args.batch_size]
mini_batch['gt_boxes'] = np.concatenate([mini_batch['gt_boxes'],]*repeats)[:args.batch_size]
# warm up
for step in range(10):
if data_queue:
mini_batch = next(data_queue)
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
_ = [loss.numpy() for loss in loss_list]
for step in range(args.steps):
tik = time.time()
if data_queue:
mini_batch = next(data_queue)
data_tok = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
loss_meter.update([loss.numpy().item() for loss in loss_list])
tok = time.time()
time_meter.update([tok - tik, data_tok - tik])
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, Loss ({}), Time (tot:{:.3f}, data:{:.3f})".format(
step,
"".join(["{:.3f} ".format(t) for t in loss_meter.average()]),
*time_meter.average(),
))
loss_meter.reset()
if dist.get_rank() == 0:
print("="*20, "summary", "="*20)
print(" benchmark: detection")
if args.trace:
print(" mode: trace(symbolic={})".format("True, sublinear=True" if args.symbolic else "False"))
else:
print(" mode: imperative")
print(" loader: {}".format("" if not args.loader else "--loader"))
if args.loader:
print(" preload: {}".format("" if not args.preload else "--preload"))
print(" arch: {}".format(args.arch))
print("train_mode: {}".format(args.mode))
print(" batchsize: {}".format(args.batch_size))
print(" #GPU: {}".format(args.ngpus))
print(" avg time: {:.3f} seconds".format(time_meter.average()[0]))
# pylint: disable=unused-argument
def build_dataset(dataset_dir, cfg):
return PseudoDetectionDataset(order=["image", "boxes", "boxes_category", "info"])
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg, preloader= False):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
| T.ToMode() | megengine.data.transform.ToMode |
import logging
from megengine.distributed.group import get_rank
from megengine.distributed import is_distributed
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name): # child
return logger
# fix stream twice bug
# while logger.handlers:
# logger.handlers.pop()
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if | is_distributed() | megengine.distributed.is_distributed |
import logging
from megengine.distributed.group import get_rank
from megengine.distributed import is_distributed
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name): # child
return logger
# fix stream twice bug
# while logger.handlers:
# logger.handlers.pop()
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if is_distributed():
rank = get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized.
By default a StreamHandler will be added.
If `log_file` is specified, a FileHandler will also be added.
The name of the root logger is the top-level package name, e.g., "edit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
root_name = __name__.split('.')[0] # edit.utils.logger
if | is_distributed() | megengine.distributed.is_distributed |
import logging
from megengine.distributed.group import get_rank
from megengine.distributed import is_distributed
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name): # child
return logger
# fix stream twice bug
# while logger.handlers:
# logger.handlers.pop()
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if is_distributed():
rank = | get_rank() | megengine.distributed.group.get_rank |
import logging
from megengine.distributed.group import get_rank
from megengine.distributed import is_distributed
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name): # child
return logger
# fix stream twice bug
# while logger.handlers:
# logger.handlers.pop()
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if is_distributed():
rank = get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized.
By default a StreamHandler will be added.
If `log_file` is specified, a FileHandler will also be added.
The name of the root logger is the top-level package name, e.g., "edit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
root_name = __name__.split('.')[0] # edit.utils.logger
if is_distributed():
rank = | get_rank() | megengine.distributed.group.get_rank |
import argparse
import logging
import os
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from loss.losses import compute_losses, compute_metrics
from common.manager import Manager
import megengine.distributed as dist
import megengine.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/base_model", help="Directory containing params.json")
parser.add_argument("--restore_file", default="best", help="name of the file in --model_dir containing weights to load")
def evaluate(model, manager):
rank = | dist.get_rank() | megengine.distributed.get_rank |
import argparse
import logging
import os
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from loss.losses import compute_losses, compute_metrics
from common.manager import Manager
import megengine.distributed as dist
import megengine.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/base_model", help="Directory containing params.json")
parser.add_argument("--restore_file", default="best", help="name of the file in --model_dir containing weights to load")
def evaluate(model, manager):
rank = dist.get_rank()
world_size = | dist.get_world_size() | megengine.distributed.get_world_size |
import argparse
import logging
import os
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from loss.losses import compute_losses, compute_metrics
from common.manager import Manager
import megengine.distributed as dist
import megengine.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/base_model", help="Directory containing params.json")
parser.add_argument("--restore_file", default="best", help="name of the file in --model_dir containing weights to load")
def evaluate(model, manager):
rank = dist.get_rank()
world_size = dist.get_world_size()
"""Evaluate the model on `num_steps` batches.
Args:
model: (torch.nn.Module) the neural network
manager: a class instance that contains objects related to train and evaluate.
"""
# set model to evaluation mode
model.eval()
# compute metrics over the dataset
if manager.dataloaders["val"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("val")
for data_batch in manager.dataloaders["val"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = | F.distributed.all_reduce_sum(v) | megengine.functional.distributed.all_reduce_sum |
import argparse
import logging
import os
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from loss.losses import compute_losses, compute_metrics
from common.manager import Manager
import megengine.distributed as dist
import megengine.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/base_model", help="Directory containing params.json")
parser.add_argument("--restore_file", default="best", help="name of the file in --model_dir containing weights to load")
def evaluate(model, manager):
rank = dist.get_rank()
world_size = dist.get_world_size()
"""Evaluate the model on `num_steps` batches.
Args:
model: (torch.nn.Module) the neural network
manager: a class instance that contains objects related to train and evaluate.
"""
# set model to evaluation mode
model.eval()
# compute metrics over the dataset
if manager.dataloaders["val"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("val")
for data_batch in manager.dataloaders["val"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = F.distributed.all_reduce_sum(v) / world_size
for k, v in metrics.items():
metrics[k] = | F.distributed.all_reduce_sum(v) | megengine.functional.distributed.all_reduce_sum |
import argparse
import logging
import os
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from loss.losses import compute_losses, compute_metrics
from common.manager import Manager
import megengine.distributed as dist
import megengine.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/base_model", help="Directory containing params.json")
parser.add_argument("--restore_file", default="best", help="name of the file in --model_dir containing weights to load")
def evaluate(model, manager):
rank = dist.get_rank()
world_size = dist.get_world_size()
"""Evaluate the model on `num_steps` batches.
Args:
model: (torch.nn.Module) the neural network
manager: a class instance that contains objects related to train and evaluate.
"""
# set model to evaluation mode
model.eval()
# compute metrics over the dataset
if manager.dataloaders["val"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("val")
for data_batch in manager.dataloaders["val"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = F.distributed.all_reduce_sum(v) / world_size
for k, v in metrics.items():
metrics[k] = F.distributed.all_reduce_sum(v) / world_size
manager.update_loss_status(loss, "val", bs)
# compute all metrics on this batch
manager.update_metric_status(metrics, "val", bs)
# update val data to tensorboard
if rank == 0:
# compute RMSE metrics
manager.summarize_metric_status(metrics, "val")
manager.writer.add_scalar("Loss/val", manager.loss_status["total"].avg, manager.epoch)
# manager.logger.info("Loss/valid epoch {}: {:.4f}".format(manager.epoch, manager.loss_status["total"].avg))
for k, v in manager.val_status.items():
manager.writer.add_scalar("Metric/val/{}".format(k), v.avg, manager.epoch)
# For each epoch, print the metric
manager.print_metrics("val", title="Val", color="green")
if manager.dataloaders["test"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("test")
for data_batch in manager.dataloaders["test"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = | F.distributed.all_reduce_sum(v) | megengine.functional.distributed.all_reduce_sum |
import argparse
import logging
import os
import dataset.data_loader as data_loader
import model.net as net
from common import utils
from loss.losses import compute_losses, compute_metrics
from common.manager import Manager
import megengine.distributed as dist
import megengine.functional as F
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", default="experiments/base_model", help="Directory containing params.json")
parser.add_argument("--restore_file", default="best", help="name of the file in --model_dir containing weights to load")
def evaluate(model, manager):
rank = dist.get_rank()
world_size = dist.get_world_size()
"""Evaluate the model on `num_steps` batches.
Args:
model: (torch.nn.Module) the neural network
manager: a class instance that contains objects related to train and evaluate.
"""
# set model to evaluation mode
model.eval()
# compute metrics over the dataset
if manager.dataloaders["val"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("val")
for data_batch in manager.dataloaders["val"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = F.distributed.all_reduce_sum(v) / world_size
for k, v in metrics.items():
metrics[k] = F.distributed.all_reduce_sum(v) / world_size
manager.update_loss_status(loss, "val", bs)
# compute all metrics on this batch
manager.update_metric_status(metrics, "val", bs)
# update val data to tensorboard
if rank == 0:
# compute RMSE metrics
manager.summarize_metric_status(metrics, "val")
manager.writer.add_scalar("Loss/val", manager.loss_status["total"].avg, manager.epoch)
# manager.logger.info("Loss/valid epoch {}: {:.4f}".format(manager.epoch, manager.loss_status["total"].avg))
for k, v in manager.val_status.items():
manager.writer.add_scalar("Metric/val/{}".format(k), v.avg, manager.epoch)
# For each epoch, print the metric
manager.print_metrics("val", title="Val", color="green")
if manager.dataloaders["test"] is not None:
# loss status and val status initial
manager.reset_loss_status()
manager.reset_metric_status("test")
for data_batch in manager.dataloaders["test"]:
# compute the real batch size
bs = data_batch["points_src"].shape[0]
# move to GPU if available
data_batch = utils.tensor_mge(data_batch)
# compute model output
output_batch = model(data_batch)
# compute all loss on this batch
loss = compute_losses(output_batch, manager.params)
metrics = compute_metrics(output_batch, manager.params)
if world_size > 1:
for k, v in loss.items():
loss[k] = F.distributed.all_reduce_sum(v) / world_size
for k, v in metrics.items():
metrics[k] = | F.distributed.all_reduce_sum(v) | megengine.functional.distributed.all_reduce_sum |
import sys
sys.path.append('.')
import cv2
import megengine as mge
import megengine.functional as F
import numpy as np
from model.RIFE import Model
model = Model()
model.load_model('train_log')
model.eval()
name = ['Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea', 'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking']
IE_list = []
for i in name:
i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0, 1) / 255.
i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0, 1) / 255.
gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
h, w = i0.shape[1], i0.shape[2]
imgs = | F.zeros([1, 6, 480, 640]) | megengine.functional.zeros |
import sys
sys.path.append('.')
import cv2
import megengine as mge
import megengine.functional as F
import numpy as np
from model.RIFE import Model
model = Model()
model.load_model('train_log')
model.eval()
name = ['Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea', 'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking']
IE_list = []
for i in name:
i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0, 1) / 255.
i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0, 1) / 255.
gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
h, w = i0.shape[1], i0.shape[2]
imgs = F.zeros([1, 6, 480, 640])
ph = (480 - h) // 2
pw = (640 - w) // 2
imgs[:, :3, :h, :w] = F.expand_dims( | mge.Tensor(i0) | megengine.Tensor |
import sys
sys.path.append('.')
import cv2
import megengine as mge
import megengine.functional as F
import numpy as np
from model.RIFE import Model
model = Model()
model.load_model('train_log')
model.eval()
name = ['Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea', 'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking']
IE_list = []
for i in name:
i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0, 1) / 255.
i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0, 1) / 255.
gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
h, w = i0.shape[1], i0.shape[2]
imgs = F.zeros([1, 6, 480, 640])
ph = (480 - h) // 2
pw = (640 - w) // 2
imgs[:, :3, :h, :w] = F.expand_dims(mge.Tensor(i0), 0).astype("float32")
imgs[:, 3:, :h, :w] = F.expand_dims( | mge.Tensor(i1) | megengine.Tensor |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = | F.sqrt(running_var + eps) | megengine.functional.sqrt |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = | F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias) | megengine.functional.conv2d |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = | F.flatten(conv.weight, end_axis=conv.weight.ndim - 4) | megengine.functional.flatten |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = | F.sqrt(running_var + eps) | megengine.functional.sqrt |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = | nn.Dropout(drop) | megengine.module.Dropout |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = | nn.BatchNorm2d(channels) | megengine.module.BatchNorm2d |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = | nn.BatchNorm2d(channels) | megengine.module.BatchNorm2d |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = nn.Identity(), _fuse_prebn_conv1x1(module.pre_bn, module.pw1)
module.premlp_bn, module.mlp.fc1 = nn.Identity(), _fuse_prebn_conv1x1(module.premlp_bn, module.mlp.fc1)
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class DownSample(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class Stem(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 3, stride=2, padding=1),
activation(),
ConvBn2d(out_channels, out_channels, 3, padding=1, groups=out_channels),
activation(),
ConvBn2d(out_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class RepLKNet(nn.Module):
def __init__(
self,
in_channels=3,
depths=(2, 2, 18, 2),
dims=(128, 256, 512, 1024),
kernel_sizes=(31, 29, 27, 13),
small_kernels=(5,),
dw_ratio=1.0,
mlp_ratio=4.0,
num_classes=1000,
drop_path_rate=0.5,
):
super().__init__()
self.stem = Stem(in_channels, dims[0])
# stochastic depth
dpr = (x for x in np.linspace(0, drop_path_rate, sum(depths))) # stochastic depth decay rule
self.blocks = []
for stage, (depth, dim, ksize) in enumerate(zip(depths, dims, kernel_sizes)):
for _ in range(depth):
self.blocks.append(
RepLKBlock(dim, ksize, small_kernels=small_kernels,
dw_ratio=dw_ratio, mlp_ratio=mlp_ratio, drop_path=next(dpr))
)
if stage < len(depths) - 1:
self.blocks.append(DownSample(dim, dims[stage + 1]))
self.norm = | nn.BatchNorm2d(dims[-1]) | megengine.module.BatchNorm2d |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = nn.Identity(), _fuse_prebn_conv1x1(module.pre_bn, module.pw1)
module.premlp_bn, module.mlp.fc1 = nn.Identity(), _fuse_prebn_conv1x1(module.premlp_bn, module.mlp.fc1)
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class DownSample(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class Stem(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 3, stride=2, padding=1),
activation(),
ConvBn2d(out_channels, out_channels, 3, padding=1, groups=out_channels),
activation(),
ConvBn2d(out_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class RepLKNet(nn.Module):
def __init__(
self,
in_channels=3,
depths=(2, 2, 18, 2),
dims=(128, 256, 512, 1024),
kernel_sizes=(31, 29, 27, 13),
small_kernels=(5,),
dw_ratio=1.0,
mlp_ratio=4.0,
num_classes=1000,
drop_path_rate=0.5,
):
super().__init__()
self.stem = Stem(in_channels, dims[0])
# stochastic depth
dpr = (x for x in np.linspace(0, drop_path_rate, sum(depths))) # stochastic depth decay rule
self.blocks = []
for stage, (depth, dim, ksize) in enumerate(zip(depths, dims, kernel_sizes)):
for _ in range(depth):
self.blocks.append(
RepLKBlock(dim, ksize, small_kernels=small_kernels,
dw_ratio=dw_ratio, mlp_ratio=mlp_ratio, drop_path=next(dpr))
)
if stage < len(depths) - 1:
self.blocks.append(DownSample(dim, dims[stage + 1]))
self.norm = nn.BatchNorm2d(dims[-1])
self.avgpool = | nn.AdaptiveAvgPool2d(1) | megengine.module.AdaptiveAvgPool2d |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = nn.Identity(), _fuse_prebn_conv1x1(module.pre_bn, module.pw1)
module.premlp_bn, module.mlp.fc1 = nn.Identity(), _fuse_prebn_conv1x1(module.premlp_bn, module.mlp.fc1)
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class DownSample(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class Stem(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 3, stride=2, padding=1),
activation(),
ConvBn2d(out_channels, out_channels, 3, padding=1, groups=out_channels),
activation(),
ConvBn2d(out_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class RepLKNet(nn.Module):
def __init__(
self,
in_channels=3,
depths=(2, 2, 18, 2),
dims=(128, 256, 512, 1024),
kernel_sizes=(31, 29, 27, 13),
small_kernels=(5,),
dw_ratio=1.0,
mlp_ratio=4.0,
num_classes=1000,
drop_path_rate=0.5,
):
super().__init__()
self.stem = Stem(in_channels, dims[0])
# stochastic depth
dpr = (x for x in np.linspace(0, drop_path_rate, sum(depths))) # stochastic depth decay rule
self.blocks = []
for stage, (depth, dim, ksize) in enumerate(zip(depths, dims, kernel_sizes)):
for _ in range(depth):
self.blocks.append(
RepLKBlock(dim, ksize, small_kernels=small_kernels,
dw_ratio=dw_ratio, mlp_ratio=mlp_ratio, drop_path=next(dpr))
)
if stage < len(depths) - 1:
self.blocks.append(DownSample(dim, dims[stage + 1]))
self.norm = nn.BatchNorm2d(dims[-1])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.head = nn.Linear(dims[-1], num_classes) if num_classes > 0 else nn.Identity()
init_weights(self)
def forward_features(self, x):
x = self.stem(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
x = self.avgpool(x)
x = | F.flatten(x, 1) | megengine.functional.flatten |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else | nn.Identity() | megengine.module.Identity |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = nn.Identity(), _fuse_prebn_conv1x1(module.pre_bn, module.pw1)
module.premlp_bn, module.mlp.fc1 = nn.Identity(), _fuse_prebn_conv1x1(module.premlp_bn, module.mlp.fc1)
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class DownSample(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class Stem(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 3, stride=2, padding=1),
activation(),
ConvBn2d(out_channels, out_channels, 3, padding=1, groups=out_channels),
activation(),
ConvBn2d(out_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class RepLKNet(nn.Module):
def __init__(
self,
in_channels=3,
depths=(2, 2, 18, 2),
dims=(128, 256, 512, 1024),
kernel_sizes=(31, 29, 27, 13),
small_kernels=(5,),
dw_ratio=1.0,
mlp_ratio=4.0,
num_classes=1000,
drop_path_rate=0.5,
):
super().__init__()
self.stem = Stem(in_channels, dims[0])
# stochastic depth
dpr = (x for x in np.linspace(0, drop_path_rate, sum(depths))) # stochastic depth decay rule
self.blocks = []
for stage, (depth, dim, ksize) in enumerate(zip(depths, dims, kernel_sizes)):
for _ in range(depth):
self.blocks.append(
RepLKBlock(dim, ksize, small_kernels=small_kernels,
dw_ratio=dw_ratio, mlp_ratio=mlp_ratio, drop_path=next(dpr))
)
if stage < len(depths) - 1:
self.blocks.append(DownSample(dim, dims[stage + 1]))
self.norm = nn.BatchNorm2d(dims[-1])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.head = | nn.Linear(dims[-1], num_classes) | megengine.module.Linear |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = nn.Identity(), _fuse_prebn_conv1x1(module.pre_bn, module.pw1)
module.premlp_bn, module.mlp.fc1 = nn.Identity(), _fuse_prebn_conv1x1(module.premlp_bn, module.mlp.fc1)
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class DownSample(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class Stem(nn.Sequential):
def __init__(self, in_channels, out_channels, activation=nn.ReLU):
super().__init__(
ConvBn2d(in_channels, out_channels, 3, stride=2, padding=1),
activation(),
ConvBn2d(out_channels, out_channels, 3, padding=1, groups=out_channels),
activation(),
ConvBn2d(out_channels, out_channels, 1),
activation(),
ConvBn2d(out_channels, out_channels, 3, stride=2, padding=1, groups=out_channels),
activation(),
)
class RepLKNet(nn.Module):
def __init__(
self,
in_channels=3,
depths=(2, 2, 18, 2),
dims=(128, 256, 512, 1024),
kernel_sizes=(31, 29, 27, 13),
small_kernels=(5,),
dw_ratio=1.0,
mlp_ratio=4.0,
num_classes=1000,
drop_path_rate=0.5,
):
super().__init__()
self.stem = Stem(in_channels, dims[0])
# stochastic depth
dpr = (x for x in np.linspace(0, drop_path_rate, sum(depths))) # stochastic depth decay rule
self.blocks = []
for stage, (depth, dim, ksize) in enumerate(zip(depths, dims, kernel_sizes)):
for _ in range(depth):
self.blocks.append(
RepLKBlock(dim, ksize, small_kernels=small_kernels,
dw_ratio=dw_ratio, mlp_ratio=mlp_ratio, drop_path=next(dpr))
)
if stage < len(depths) - 1:
self.blocks.append(DownSample(dim, dims[stage + 1]))
self.norm = nn.BatchNorm2d(dims[-1])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.head = nn.Linear(dims[-1], num_classes) if num_classes > 0 else | nn.Identity() | megengine.module.Identity |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += | F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2) | megengine.functional.pad |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = | nn.Identity() | megengine.module.Identity |
# Copyright (c) 2014-2022 Megvii Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import megengine
import megengine.functional as F
import megengine.module as nn
import numpy as np
from basecls.layers import DropPath, init_weights
from basecls.utils import registers
def _fuse_prebn_conv1x1(bn, conv):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
assert conv.groups == 1
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(1, -1, 1, 1)
module_output.weight[:] = kernel * t
module_output.bias[:] = F.conv2d(beta - running_mean * gamma / std, kernel, conv.bias)
return module_output
def _fuse_conv_bn(conv, bn):
module_output = copy.deepcopy(conv)
module_output.bias = megengine.Parameter(np.zeros(module_output._infer_bias_shape(), dtype=np.float32))
# flatten then reshape in case of group conv
kernel = F.flatten(conv.weight, end_axis=conv.weight.ndim - 4)
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = F.sqrt(running_var + eps)
t = (gamma / std).reshape(-1, 1, 1, 1)
module_output.weight[:] = (kernel * t).reshape(module_output.weight.shape)
module_output.bias[:] = beta + ((conv.bias if conv.bias is not None else 0) - running_mean) * gamma / std
return module_output
class ConvBn2d(nn.ConvBn2d):
def __init__(self, *args, **kwargs):
bias = kwargs.pop("bias", False) and False
super().__init__(*args, bias=bias, **kwargs)
@classmethod
def fuse_conv_bn(cls, module: nn.Module):
module_output = module
if isinstance(module, ConvBn2d):
return _fuse_conv_bn(module.conv, module.bn)
for name, child in module.named_children():
setattr(module_output, name, cls.fuse_conv_bn(child))
del module
return module_output
class LargeKernelReparam(nn.Module):
def __init__(self, channels, kernel, small_kernels=()):
super(LargeKernelReparam, self).__init__()
self.dw_large = ConvBn2d(channels, channels, kernel, padding=kernel // 2, groups=channels)
self.small_kernels = small_kernels
for k in self.small_kernels:
setattr(self, f"dw_small_{k}", ConvBn2d(channels, channels, k, padding=k // 2, groups=channels))
def forward(self, inp):
outp = self.dw_large(inp)
for k in self.small_kernels:
outp += getattr(self, f"dw_small_{k}")(inp)
return outp
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, LargeKernelReparam):
module = ConvBn2d.fuse_conv_bn(module)
module_output = copy.deepcopy(module.dw_large)
kernel = module_output.kernel_size[0]
for k in module.small_kernels:
dw_small = getattr(module, f"dw_small_{k}")
module_output.weight += F.pad(dw_small.weight, [[0, 0]] * 3 + [[(kernel - k) // 2] * 2] * 2)
module_output.bias += dw_small.bias
return module_output
for name, child in module.named_children():
setattr(module_output, name, cls.convert_to_deploy(child))
del module
return module_output
class Mlp(nn.Module):
def __init__(self, in_channels, hidden_channels=None, out_channels=None, act_layer=nn.GELU, drop=0.,):
super().__init__()
out_features = out_channels or in_channels
hidden_features = hidden_channels or in_channels
self.fc1 = ConvBn2d(in_channels, hidden_features, 1, stride=1, padding=0)
self.act = act_layer()
self.fc2 = ConvBn2d(hidden_features, out_features, 1, stride=1, padding=0)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class RepLKBlock(nn.Module):
def __init__(self, channels, kernel, small_kernels=(), dw_ratio=1.0, mlp_ratio=4.0, drop_path=0., activation=nn.ReLU):
super().__init__()
self.pre_bn = nn.BatchNorm2d(channels)
self.pw1 = ConvBn2d(channels, int(channels * dw_ratio), 1, 1, 0)
self.pw1_act = activation()
self.dw = LargeKernelReparam(int(channels * dw_ratio), kernel, small_kernels=small_kernels)
self.dw_act = activation()
self.pw2 = ConvBn2d(int(channels * dw_ratio), channels, 1, 1, 0)
self.premlp_bn = nn.BatchNorm2d(channels)
self.mlp = Mlp(in_channels=channels, hidden_channels=int(channels * mlp_ratio))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
y = self.pre_bn(x)
y = self.pw1_act(self.pw1(y))
y = self.dw_act(self.dw(y))
y = self.pw2(y)
x = x + self.drop_path(y)
y = self.premlp_bn(x)
y = self.mlp(y)
x = x + self.drop_path(y)
return x
@classmethod
def convert_to_deploy(cls, module: nn.Module):
module_output = module
if isinstance(module, RepLKBlock):
LargeKernelReparam.convert_to_deploy(module)
ConvBn2d.fuse_conv_bn(module)
module.pre_bn, module.pw1 = nn.Identity(), _fuse_prebn_conv1x1(module.pre_bn, module.pw1)
module.premlp_bn, module.mlp.fc1 = | nn.Identity() | megengine.module.Identity |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = | nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias) | megengine.module.Conv2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = | nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias) | megengine.module.Conv2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = | nn.Conv2d(in_size, out_size, kernel_size=1, bias=True) | megengine.module.Conv2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = | nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True) | megengine.module.ConvTranspose2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = | F.concat([up, bridge], 1) | megengine.functional.concat |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = F.matmul(V_t, V)
mat_inv = F.matinv(mat)
project_mat = F.matmul(mat_inv, V_t)
bridge_ = bridge.reshape(b_, c_, h_*w_)
project_feature = F.matmul(project_mat, bridge_.transpose(0, 2, 1))
bridge = F.matmul(V, project_feature).transpose(0, 2, 1).reshape(b_, c_, h_, w_)
out = F.concat([up, bridge], 1)
out = self.conv_block(out)
return out
class Subspace(nn.Module):
def __init__(self, in_size, out_size):
super(Subspace, self).__init__()
self.blocks = []
self.blocks.append(UNetConvBlock(in_size, out_size, False, 0.2))
self.shortcut = | nn.Conv2d(in_size, out_size, kernel_size=1, bias=True) | megengine.module.Conv2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = F.matmul(V_t, V)
mat_inv = F.matinv(mat)
project_mat = F.matmul(mat_inv, V_t)
bridge_ = bridge.reshape(b_, c_, h_*w_)
project_feature = F.matmul(project_mat, bridge_.transpose(0, 2, 1))
bridge = F.matmul(V, project_feature).transpose(0, 2, 1).reshape(b_, c_, h_, w_)
out = F.concat([up, bridge], 1)
out = self.conv_block(out)
return out
class Subspace(nn.Module):
def __init__(self, in_size, out_size):
super(Subspace, self).__init__()
self.blocks = []
self.blocks.append(UNetConvBlock(in_size, out_size, False, 0.2))
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
sc = self.shortcut(x)
for i in range(len(self.blocks)):
x = self.blocks[i](x)
return x + sc
class skip_blocks(nn.Module):
def __init__(self, in_size, out_size, repeat_num=1):
super(skip_blocks, self).__init__()
self.blocks = []
self.re_num = repeat_num
mid_c = 128
self.blocks.append(UNetConvBlock(in_size, mid_c, False, 0.2))
for i in range(self.re_num - 2):
self.blocks.append(UNetConvBlock(mid_c, mid_c, False, 0.2))
self.blocks.append(UNetConvBlock(mid_c, out_size, False, 0.2))
self.shortcut = | nn.Conv2d(in_size, out_size, kernel_size=1, bias=True) | megengine.module.Conv2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
| nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True) | megengine.module.Conv2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
| nn.LeakyReLU(relu_slope) | megengine.module.LeakyReLU |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
| nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True) | megengine.module.Conv2d |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
| nn.LeakyReLU(relu_slope) | megengine.module.LeakyReLU |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = | F.matmul(V_t, V) | megengine.functional.matmul |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = F.matmul(V_t, V)
mat_inv = | F.matinv(mat) | megengine.functional.matinv |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = F.matmul(V_t, V)
mat_inv = F.matinv(mat)
project_mat = | F.matmul(mat_inv, V_t) | megengine.functional.matmul |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = F.matmul(V_t, V)
mat_inv = F.matinv(mat)
project_mat = F.matmul(mat_inv, V_t)
bridge_ = bridge.reshape(b_, c_, h_*w_)
project_feature = F.matmul(project_mat, bridge_.transpose(0, 2, 1))
bridge = F.matmul(V, project_feature).transpose(0, 2, 1).reshape(b_, c_, h_, w_)
out = | F.concat([up, bridge], 1) | megengine.functional.concat |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
| nn.init.xavier_uniform_(m.weight) | megengine.module.init.xavier_uniform_ |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
| nn.init.zeros_(m.bias) | megengine.module.init.zeros_ |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + | F.abs(V_t) | megengine.functional.abs |
#!/usr/bin/env python3
import megengine as mge
import megengine.module as nn
import megengine.functional as F
def conv3x3(in_chn, out_chn, bias=True):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
return layer
def conv_down(in_chn, out_chn, bias=False):
layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
return layer
class UNetD(nn.Module):
def __init__(self, in_chn, wf=32, depth=5, relu_slope=0.2, subspace_dim=16):
super(UNetD, self).__init__()
self.depth = depth
self.down_path = []
prev_channels = self.get_input_chn(in_chn)
for i in range(depth):
downsample = True if (i+1) < depth else False
self.down_path.append(UNetConvBlock(prev_channels, (2**i)*wf, downsample, relu_slope))
prev_channels = (2**i) * wf
# self.ema = EMAU(prev_channels, prev_channels//8)
self.up_path = []
subnet_repeat_num = 1
for i in reversed(range(depth - 1)):
self.up_path.append(UNetUpBlock(prev_channels, (2**i)*wf, relu_slope, subnet_repeat_num, subspace_dim))
prev_channels = (2**i)*wf
subnet_repeat_num += 1
self.last = conv3x3(prev_channels, in_chn, bias=True)
#self._initialize()
def forward(self, x1):
blocks = []
for i, down in enumerate(self.down_path):
# print(x1.shape)
if (i+1) < self.depth:
x1, x1_up = down(x1)
blocks.append(x1_up)
else:
x1 = down(x1)
# print(x1.shape)
# x1 = self.ema(x1)
for i, up in enumerate(self.up_path):
# print(x1.shape, blocks[-i-1].shape)
x1 = up(x1, blocks[-i-1])
pred = self.last(x1)
return pred
def get_input_chn(self, in_chn):
return in_chn
def _initialize(self):
gain = nn.init.calculate_gain('leaky_relu', 0.20)
for m in self.modules():
if isinstance(m, nn.Conv2d):
print("weight")
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
print("bias")
nn.init.zeros_(m.bias)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, downsample, relu_slope):
super(UNetConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope),
nn.Conv2d(out_size, out_size, kernel_size=3, padding=1, bias=True),
nn.LeakyReLU(relu_slope))
self.downsample = downsample
if downsample:
self.downsample = conv_down(out_size, out_size, bias=False)
self.shortcut = nn.Conv2d(in_size, out_size, kernel_size=1, bias=True)
def forward(self, x):
out = self.block(x)
sc = self.shortcut(x)
out = out + sc
if self.downsample:
out_down = self.downsample(out)
return out_down, out
else:
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, relu_slope, subnet_repeat_num, subspace_dim=16):
super(UNetUpBlock, self).__init__()
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2, bias=True)
self.conv_block = UNetConvBlock(in_size, out_size, False, relu_slope)
self.num_subspace = subspace_dim
print(self.num_subspace, subnet_repeat_num)
self.subnet = Subspace(in_size, self.num_subspace)
self.skip_m = skip_blocks(out_size, out_size, subnet_repeat_num)
def forward(self, x, bridge):
up = self.up(x)
bridge = self.skip_m(bridge)
out = F.concat([up, bridge], 1)
if self.subnet:
b_, c_, h_, w_ = bridge.shape
sub = self.subnet(out)
V_t = sub.reshape(b_, self.num_subspace, h_*w_)
V_t = V_t / (1e-6 + F.abs(V_t).sum(axis=2, keepdims=True))
V = V_t.transpose(0, 2, 1)
mat = F.matmul(V_t, V)
mat_inv = F.matinv(mat)
project_mat = F.matmul(mat_inv, V_t)
bridge_ = bridge.reshape(b_, c_, h_*w_)
project_feature = F.matmul(project_mat, bridge_.transpose(0, 2, 1))
bridge = | F.matmul(V, project_feature) | megengine.functional.matmul |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import pickle
import boto3
import megengine as mge
from collections import defaultdict
from termcolor import colored
from common import utils
class Manager():
def __init__(self, model, optimizer, params, dataloaders, writer, logger, scheduler):
# params status
self.model = model
self.writer = writer
self.logger = logger
self.params = params
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
# metric_rule should be either Descende or Ascende
self.metric_rule = params.metric_rule
self.epoch = 0
self.step = 0
# 越低越好
if self.metric_rule == "Descende":
self.best_val_score = 100
self.best_test_score = 100
# 越高越好
elif self.metric_rule == "Ascende":
self.best_val_score = 0
self.best_test_score = 0
self.cur_val_score = 0
self.cur_test_score = 0
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# client init
self.s3_client = boto3.client('s3', endpoint_url='http://oss.i.brainpp.cn')
self.bucket_name = params.bucket_name
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, split, bs=None):
if split == "train":
for k, v in loss.items():
bs = self.params.train_batch_size
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "val":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "test":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
else:
raise ValueError("Wrong eval type: {}".format(split))
def update_metric_status(self, metrics, split, bs):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=bs)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=bs)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong eval type: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.6f} ".format(exp_name, self.epoch, self.scheduler.get_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red"):
if split == "val":
metric_status = self.val_status
elif split == "test":
metric_status = self.test_status
else:
raise ValueError("Wrong eval type: {}".format(split))
if split == "val":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
print_str += " | lastest: {:.4f} | pre_best: {:.4f}".format(self.cur_val_score, self.best_val_score)
elif split == "test":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
self.logger.info(colored("{} Results: {}".format(title, print_str), color, attrs=["bold"]))
def check_best_save_last_checkpoints(self, latest_freq=5):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if "val" in self.dataloaders:
state["best_val_score"] = self.best_val_score
if "test" in self.dataloaders:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
if self.params.save_mode == "local":
# print(state)
mge.save(state, latest_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=latest_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
# save val latest metrics, and check if val is best checkpoints
if "val" in self.dataloaders:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
# 越低越好
if self.metric_rule == "Descende":
is_best = self.cur_val_score < self.best_val_score
# 越高越好
elif self.metric_rule == "Ascende":
is_best = self.cur_val_score > self.best_val_score
else:
raise Exception("metric_rule should be either Descende or Ascende")
if is_best:
# save metrics
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.4f}".format(self.best_val_score))
# save checkpoint
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
if self.params.save_mode == "local":
mge.save(state, best_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=best_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved val best checkpoint to: {}".format(best_ckpt_name))
# save test latest metrics, and check if test is best checkpoints
# if self.dataloaders["test"] is not None:
if "test" in self.dataloaders:
test_latest_metrics_name = os.path.join(self.params.model_dir, "test_metrics_latest.json")
utils.save_dict_to_json(self.test_status, test_latest_metrics_name)
# 越低越好
if self.metric_rule == "Descende":
is_best = self.cur_test_score < self.best_test_score
# 越高越好
elif self.metric_rule == "Ascende":
is_best = self.cur_test_score > self.best_test_score
else:
raise Exception("metric_rule should be either Descende or Ascende")
if is_best:
# save metrics
self.best_test_score = self.cur_test_score
best_metrics_name = os.path.join(self.params.model_dir, "test_metrics_best.json")
utils.save_dict_to_json(self.test_status, best_metrics_name)
self.logger.info("Current is test best, score={:.4f}".format(self.best_test_score))
# save checkpoint
best_ckpt_name = os.path.join(self.params.model_dir, "test_model_best.pth")
if self.params.save_mode == "local":
mge.save(state, best_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=best_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved test best checkpoint to: {}".format(best_ckpt_name))
def load_checkpoints(self):
if self.params.save_mode == "local":
state = | mge.load(self.params.restore_file) | megengine.load |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import pickle
import boto3
import megengine as mge
from collections import defaultdict
from termcolor import colored
from common import utils
class Manager():
def __init__(self, model, optimizer, params, dataloaders, writer, logger, scheduler):
# params status
self.model = model
self.writer = writer
self.logger = logger
self.params = params
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
# metric_rule should be either Descende or Ascende
self.metric_rule = params.metric_rule
self.epoch = 0
self.step = 0
# 越低越好
if self.metric_rule == "Descende":
self.best_val_score = 100
self.best_test_score = 100
# 越高越好
elif self.metric_rule == "Ascende":
self.best_val_score = 0
self.best_test_score = 0
self.cur_val_score = 0
self.cur_test_score = 0
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# client init
self.s3_client = boto3.client('s3', endpoint_url='http://oss.i.brainpp.cn')
self.bucket_name = params.bucket_name
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, split, bs=None):
if split == "train":
for k, v in loss.items():
bs = self.params.train_batch_size
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "val":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "test":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
else:
raise ValueError("Wrong eval type: {}".format(split))
def update_metric_status(self, metrics, split, bs):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=bs)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=bs)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong eval type: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.6f} ".format(exp_name, self.epoch, self.scheduler.get_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red"):
if split == "val":
metric_status = self.val_status
elif split == "test":
metric_status = self.test_status
else:
raise ValueError("Wrong eval type: {}".format(split))
if split == "val":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
print_str += " | lastest: {:.4f} | pre_best: {:.4f}".format(self.cur_val_score, self.best_val_score)
elif split == "test":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
self.logger.info(colored("{} Results: {}".format(title, print_str), color, attrs=["bold"]))
def check_best_save_last_checkpoints(self, latest_freq=5):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if "val" in self.dataloaders:
state["best_val_score"] = self.best_val_score
if "test" in self.dataloaders:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
if self.params.save_mode == "local":
# print(state)
| mge.save(state, latest_ckpt_name) | megengine.save |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import pickle
import boto3
import megengine as mge
from collections import defaultdict
from termcolor import colored
from common import utils
class Manager():
def __init__(self, model, optimizer, params, dataloaders, writer, logger, scheduler):
# params status
self.model = model
self.writer = writer
self.logger = logger
self.params = params
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
# metric_rule should be either Descende or Ascende
self.metric_rule = params.metric_rule
self.epoch = 0
self.step = 0
# 越低越好
if self.metric_rule == "Descende":
self.best_val_score = 100
self.best_test_score = 100
# 越高越好
elif self.metric_rule == "Ascende":
self.best_val_score = 0
self.best_test_score = 0
self.cur_val_score = 0
self.cur_test_score = 0
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# client init
self.s3_client = boto3.client('s3', endpoint_url='http://oss.i.brainpp.cn')
self.bucket_name = params.bucket_name
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, split, bs=None):
if split == "train":
for k, v in loss.items():
bs = self.params.train_batch_size
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "val":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "test":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
else:
raise ValueError("Wrong eval type: {}".format(split))
def update_metric_status(self, metrics, split, bs):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=bs)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=bs)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong eval type: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.6f} ".format(exp_name, self.epoch, self.scheduler.get_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red"):
if split == "val":
metric_status = self.val_status
elif split == "test":
metric_status = self.test_status
else:
raise ValueError("Wrong eval type: {}".format(split))
if split == "val":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
print_str += " | lastest: {:.4f} | pre_best: {:.4f}".format(self.cur_val_score, self.best_val_score)
elif split == "test":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
self.logger.info(colored("{} Results: {}".format(title, print_str), color, attrs=["bold"]))
def check_best_save_last_checkpoints(self, latest_freq=5):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if "val" in self.dataloaders:
state["best_val_score"] = self.best_val_score
if "test" in self.dataloaders:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
if self.params.save_mode == "local":
# print(state)
mge.save(state, latest_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=latest_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
# save val latest metrics, and check if val is best checkpoints
if "val" in self.dataloaders:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
# 越低越好
if self.metric_rule == "Descende":
is_best = self.cur_val_score < self.best_val_score
# 越高越好
elif self.metric_rule == "Ascende":
is_best = self.cur_val_score > self.best_val_score
else:
raise Exception("metric_rule should be either Descende or Ascende")
if is_best:
# save metrics
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.4f}".format(self.best_val_score))
# save checkpoint
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
if self.params.save_mode == "local":
| mge.save(state, best_ckpt_name) | megengine.save |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import pickle
import boto3
import megengine as mge
from collections import defaultdict
from termcolor import colored
from common import utils
class Manager():
def __init__(self, model, optimizer, params, dataloaders, writer, logger, scheduler):
# params status
self.model = model
self.writer = writer
self.logger = logger
self.params = params
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
# metric_rule should be either Descende or Ascende
self.metric_rule = params.metric_rule
self.epoch = 0
self.step = 0
# 越低越好
if self.metric_rule == "Descende":
self.best_val_score = 100
self.best_test_score = 100
# 越高越好
elif self.metric_rule == "Ascende":
self.best_val_score = 0
self.best_test_score = 0
self.cur_val_score = 0
self.cur_test_score = 0
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# client init
self.s3_client = boto3.client('s3', endpoint_url='http://oss.i.brainpp.cn')
self.bucket_name = params.bucket_name
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, split, bs=None):
if split == "train":
for k, v in loss.items():
bs = self.params.train_batch_size
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "val":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
elif split == "test":
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=bs)
else:
raise ValueError("Wrong eval type: {}".format(split))
def update_metric_status(self, metrics, split, bs):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=bs)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=bs)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong eval type: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.6f} ".format(exp_name, self.epoch, self.scheduler.get_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red"):
if split == "val":
metric_status = self.val_status
elif split == "test":
metric_status = self.test_status
else:
raise ValueError("Wrong eval type: {}".format(split))
if split == "val":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
print_str += " | lastest: {:.4f} | pre_best: {:.4f}".format(self.cur_val_score, self.best_val_score)
elif split == "test":
print_str = " | ".join("{}: {:.4f} - avg: {:.4f}".format(k, v.val, v.avg) for k, v in metric_status.items())
self.logger.info(colored("{} Results: {}".format(title, print_str), color, attrs=["bold"]))
def check_best_save_last_checkpoints(self, latest_freq=5):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if "val" in self.dataloaders:
state["best_val_score"] = self.best_val_score
if "test" in self.dataloaders:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
if self.params.save_mode == "local":
# print(state)
mge.save(state, latest_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=latest_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
# save val latest metrics, and check if val is best checkpoints
if "val" in self.dataloaders:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
# 越低越好
if self.metric_rule == "Descende":
is_best = self.cur_val_score < self.best_val_score
# 越高越好
elif self.metric_rule == "Ascende":
is_best = self.cur_val_score > self.best_val_score
else:
raise Exception("metric_rule should be either Descende or Ascende")
if is_best:
# save metrics
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.4f}".format(self.best_val_score))
# save checkpoint
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
if self.params.save_mode == "local":
mge.save(state, best_ckpt_name)
elif self.params.save_mode == "oss":
save_dict = pickle.dumps(state)
resp = self.s3_client.put_object(Bucket=self.bucket_name, Key=best_ckpt_name, Body=save_dict[0:])
else:
raise NotImplementedError
self.logger.info("Saved val best checkpoint to: {}".format(best_ckpt_name))
# save test latest metrics, and check if test is best checkpoints
# if self.dataloaders["test"] is not None:
if "test" in self.dataloaders:
test_latest_metrics_name = os.path.join(self.params.model_dir, "test_metrics_latest.json")
utils.save_dict_to_json(self.test_status, test_latest_metrics_name)
# 越低越好
if self.metric_rule == "Descende":
is_best = self.cur_test_score < self.best_test_score
# 越高越好
elif self.metric_rule == "Ascende":
is_best = self.cur_test_score > self.best_test_score
else:
raise Exception("metric_rule should be either Descende or Ascende")
if is_best:
# save metrics
self.best_test_score = self.cur_test_score
best_metrics_name = os.path.join(self.params.model_dir, "test_metrics_best.json")
utils.save_dict_to_json(self.test_status, best_metrics_name)
self.logger.info("Current is test best, score={:.4f}".format(self.best_test_score))
# save checkpoint
best_ckpt_name = os.path.join(self.params.model_dir, "test_model_best.pth")
if self.params.save_mode == "local":
| mge.save(state, best_ckpt_name) | megengine.save |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
from typing import Tuple, Union
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ...module import Conv2d
from ...quantization.utils import register_method_to_class
class _ConvBnActivation2d(Conv2d):
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.scale = 1.0
self.zero_point = 0.0
self.output_dtype = | mgb.dtype.qint8(self.scale) | megengine._internal.dtype.qint8 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
from typing import Tuple, Union
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ...module import Conv2d
from ...quantization.utils import register_method_to_class
class _ConvBnActivation2d(Conv2d):
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.scale = 1.0
self.zero_point = 0.0
self.output_dtype = mgb.dtype.qint8(self.scale)
self.weight = self.weight.astype(self.output_dtype)
self.bias = self.bias.astype(mgb.dtype.qint32(self.scale))
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = | mgb.dtype.get_scale(inp.dtype) | megengine._internal.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
from typing import Tuple, Union
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ...module import Conv2d
from ...quantization.utils import register_method_to_class
class _ConvBnActivation2d(Conv2d):
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.scale = 1.0
self.zero_point = 0.0
self.output_dtype = mgb.dtype.qint8(self.scale)
self.weight = self.weight.astype(self.output_dtype)
self.bias = self.bias.astype(mgb.dtype.qint32(self.scale))
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = | mgb.dtype.get_scale(self.weight.dtype) | megengine._internal.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
from typing import Tuple, Union
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ...module import Conv2d
from ...quantization.utils import register_method_to_class
class _ConvBnActivation2d(Conv2d):
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.scale = 1.0
self.zero_point = 0.0
self.output_dtype = mgb.dtype.qint8(self.scale)
self.weight = self.weight.astype(self.output_dtype)
self.bias = self.bias.astype( | mgb.dtype.qint32(self.scale) | megengine._internal.dtype.qint32 |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
from typing import Tuple, Union
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ...module import Conv2d
from ...quantization.utils import register_method_to_class
class _ConvBnActivation2d(Conv2d):
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.scale = 1.0
self.zero_point = 0.0
self.output_dtype = mgb.dtype.qint8(self.scale)
self.weight = self.weight.astype(self.output_dtype)
self.bias = self.bias.astype(mgb.dtype.qint32(self.scale))
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = mgb.dtype.get_scale(self.weight.dtype)
bias_scale = inp_scale * w_scale
return conv_bias_activation(
inp,
self.weight,
self.bias.astype( | mgb.dtype.qint32(bias_scale) | megengine._internal.dtype.qint32 |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from megengine import tensor
from megengine.test import assertTensorClose
def test_reshape_tuple():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_asterisk():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(*inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_shapeof():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shapeof())
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_tensor():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape( | tensor([4, 4]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from megengine import tensor
from megengine.test import assertTensorClose
def test_reshape_tuple():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_asterisk():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(*inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_shapeof():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shapeof())
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_tensor():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(tensor([4, 4]))
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_tensor_fused():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape( | tensor([4, 4]) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from megengine import tensor
from megengine.test import assertTensorClose
def test_reshape_tuple():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_asterisk():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(*inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_shapeof():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shapeof())
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_tensor():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(tensor([4, 4]))
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_tensor_fused():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(tensor([4, 4]), 1)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4, 1))
def test_reshape_fused():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape( | tensor(2) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from megengine import tensor
from megengine.test import assertTensorClose
def test_reshape_tuple():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_asterisk():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(*inp.shape)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_shapeof():
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4, 4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(inp.shapeof())
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_tensor():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(tensor([4, 4]))
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4))
def test_reshape_tensor_fused():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(tensor([4, 4]), 1)
assertTensorClose(out.numpy(), np.arange(100, 116, dtype=np.int32).reshape(4, 4, 1))
def test_reshape_fused():
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1, 16))
out = out.reshape(tensor(2), 2, | tensor(4) | megengine.tensor |
import sys
sys.path.append('.')
import time
import megengine as mge
from model.RIFE import Model
model = Model()
model.eval()
I0 = | mge.random(1, 3, 480, 640) | megengine.random |
import sys
sys.path.append('.')
import time
import megengine as mge
from model.RIFE import Model
model = Model()
model.eval()
I0 = mge.random(1, 3, 480, 640)
I1 = | mge.random(1, 3, 480, 640) | megengine.random |
import sys
sys.path.append('.')
import time
import megengine as mge
from model.RIFE import Model
model = Model()
model.eval()
I0 = mge.random(1, 3, 480, 640)
I1 = mge.random(1, 3, 480, 640)
for i in range(100):
pred = model.inference(I0, I1)
| mge._full_sync() | megengine._full_sync |
import sys
sys.path.append('.')
import time
import megengine as mge
from model.RIFE import Model
model = Model()
model.eval()
I0 = mge.random(1, 3, 480, 640)
I1 = mge.random(1, 3, 480, 640)
for i in range(100):
pred = model.inference(I0, I1)
mge._full_sync()
time_stamp = time.time()
for i in range(100):
pred = model.inference(I0, I1)
| mge._full_sync() | megengine._full_sync |
# Copyright (c) Megvii, Inc. and its affiliates.
import megengine.functional as F
import megengine.module as M
from .head import get_head
from .loss import get_loss
from .resnet import get_backbone
from .stn import STN
class FaceRecognitionModel(M.Module):
"""combination of all building blocks, including backbone, head and loss
"""
def __init__(self, configs):
"""initialize with configuration
Args:
configs (dict): configuration, required fields include:
backbone: custom name of backbone
output_head: custon name of output head
feature_dim: dimension number of output embedding
loss_type: custon name of loss function
num_class: classification number of dataset
loss_scale: used in loss function
loss_m1: used in loss function
loss_m2: used in loss function
loss_m3: used in loss function
use_stn: whether or not use stn
"""
super().__init__()
backbone_constructor = get_backbone(configs["backbone"])
self.backbone = backbone_constructor()
head_constructor = get_head(configs["output_head"])
self.head = head_constructor(feature_dim=configs["feature_dim"], channel=self.backbone.output_channel)
metric_constructor = get_loss(configs["loss_type"])
self.metric = metric_constructor(
num_class=configs["num_class"],
scale=configs["loss_scale"],
m1=configs["loss_m1"],
m2=configs["loss_m2"],
m3=configs["loss_m3"],
feature_dim=configs["feature_dim"],
)
if configs["use_stn"]:
self.stn = STN()
self.use_stn = True
else:
self.use_stn = False
def forward_embedding_only(self, images):
"""run forward pass without calculating loss, expected useful during evaluation.
Args:
images (Tensor): preprocessed images (shape: n * 3 * 112 * 112)
Returns:
embedding (Tensor): embedding
"""
if self.use_stn:
images = self.stn(images)
feature_map = self.backbone(images)
embedding = self.head(feature_map)
embedding = | F.normalize(embedding, axis=1) | megengine.functional.normalize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = | mge.tensor(x, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = | mge.tensor(s, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = | mge.tensor(g_y, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = | QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax) | megengine.core.tensor.dtype.QuantDtypeMeta |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = | tensor([1.0], dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = | tensor([4.0], dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = | mge.tensor(x, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = | mge.tensor(s, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
zero_point = | mge.tensor(zero_point, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
zero_point = mge.tensor(zero_point, dtype="float32")
grad_s = | mge.tensor(grad_s, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
zero_point = mge.tensor(zero_point, dtype="float32")
grad_s = mge.tensor(grad_s, dtype="float32")
g_y = | mge.tensor(g_y, dtype="float32") | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = | tqt_forward(-127, 127, x, s) | megengine.quantization.utils.tqt_forward |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return | F.round(x) | megengine.functional.round |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum( | F.maximum(oup, qmin) | megengine.functional.maximum |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = | create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point) | megengine.quantization.utils.create_qparams |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = | tensor(inp_data, dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = | tensor(inp_data, dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = | tensor(inp_data, dtype=np.float32) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = | F.full((1, 32, 3, 3), np.nan) | megengine.functional.full |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
zero_point = mge.tensor(zero_point, dtype="float32")
grad_s = mge.tensor(grad_s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = fake_quant_tensor(x, qparams).numpy()
assert np.isnan(y).all()
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
class LSQ_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale, zero_point, grad_scale):
inp_scaled = inp / scale + zero_point
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.floor(inp_clipped + 0.5)
inp_flq = (inp_rounded - zero_point) * scale
self.saved_tensors = (inp_scaled, inp_rounded, scale, grad_scale)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, scale, grad_scale) = self.saved_tensors
ind_small = inp_scaled < self.lowerbound
ind_big = inp_scaled > self.upperbound
ind_middle = np.logical_xor(ind_small, ind_big)
ind_middle = np.abs(ind_middle - 1)
grad_s = (
ind_small * self.lowerbound
+ ind_big * self.upperbound
+ ind_middle * (-inp_scaled + inp_rounded)
)
grad_s = grad_s * grad_scale * grad_inp_flq
grad_s = grad_s.sum()
grad_inp = grad_inp_flq * ind_middle
return grad_inp, grad_s
def test_lsq():
g = []
def cb(grad):
g.append(grad)
# FIXME: use random number when LSQ is fixed
# x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
# s = np.random.rand(1)
x = np.array(
[
[
[
[4.0, 38.0, -121.0, 38.0],
[15.0, -115.0, -112.0, 24.0],
[23.0, -65.0, 109.0, -115.0],
],
[
[-66.0, -90.0, -45.0, -101.0],
[68.0, -98.0, 108.0, -79.0],
[54.0, 63.0, -10.0, -50.0],
],
]
],
dtype="float32",
)
s = np.array([0.02918224], dtype="float32")
eps = np.array([1e-5], dtype="float32")
s = np.abs(s) if np.abs(s) > eps else eps
zero_point = np.array([1.0], dtype="float32")
grad_s = np.array([2.0], dtype="float32")
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = LSQ_numpy(-127, 127)
y_np = n.forward(x, s, zero_point, grad_s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
zero_point = mge.tensor(zero_point, dtype="float32")
grad_s = mge.tensor(grad_s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = | lsq_forward(-127, 127, x, s, zero_point, grad_s) | megengine.quantization.utils.lsq_forward |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = | fake_quant_tensor(x, qparams) | megengine.quantization.utils.fake_quant_tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert | make_shape_tuple(x.grad.shape) | megengine.core.tensor.utils.make_shape_tuple |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == | make_shape_tuple(x1.grad.shape) | megengine.core.tensor.utils.make_shape_tuple |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = | fake_quant_tensor(inp, qparams) | megengine.quantization.utils.fake_quant_tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
# test nan
x = F.full((1, 32, 3, 3), np.nan)
y = | fake_quant_tensor(x, qparams) | megengine.quantization.utils.fake_quant_tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor( | F.ones_like(x) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.dtype import QuantDtypeMeta
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import (
QuantMode,
create_qparams,
fake_quant_tensor,
lsq_forward,
tqt_forward,
)
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.randint(-128, 128, size=(1, 2, 3, 4)).astype("float32")
s = np.random.rand(1) - 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
with Grad() as grad:
grad.wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_x.numpy(), g_x_np, rtol=1e-5, atol=1e-5)
np.testing.assert_allclose(g_s.numpy(), g_s_np, rtol=5e-5, atol=5e-5)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
test_dtype = QuantDtypeMeta("test_qint8", None, "int8", qmin, qmax)
def run(zero_point, scale):
qparams = create_qparams(QuantMode.ASYMMERTIC, test_dtype, scale, zero_point)
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qparams).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qparams)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
with Grad() as grad:
grad.wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor( | F.ones_like(x1) | megengine.functional.ones_like |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Tuple, Union
import numpy as np
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ..qat import conv as QAT
from .module import QuantizedModule
class Conv2d(Float.Conv2d, QuantizedModule):
r"""quantized version of :class:`~.qat.conv.Conv2d`."""
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
dtype=None,
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.output_dtype = dtype
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = | mgb.dtype.get_scale(inp.dtype) | megengine._internal.dtype.get_scale |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Tuple, Union
import numpy as np
import megengine._internal as mgb
from ... import module as Float
from ...core import Parameter
from ...functional import conv_bias_activation
from ..qat import conv as QAT
from .module import QuantizedModule
class Conv2d(Float.Conv2d, QuantizedModule):
r"""quantized version of :class:`~.qat.conv.Conv2d`."""
r"""Applies a 2D convolution over an quantized input tensor, inference only.
The parameter is same with :class: `~.Conv2d`
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode: str = "CROSS_CORRELATION",
compute_mode: str = "DEFAULT",
dtype=None,
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
True,
conv_mode,
compute_mode,
)
self.output_dtype = dtype
def calc_conv_quantized(self, inp, nonlinear_mode="IDENTITY"):
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = | mgb.dtype.get_scale(self.weight.dtype) | megengine._internal.dtype.get_scale |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.