python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import torch._C._lazy
def render_ir_graph(tensors):
"""Return a text dump of the LTC IR graph in dot format for the tensors.
The text can be processed by tools like dot to be rendered in pdf,png etc."""
return torch._C._lazy._get_tensors_dot(tensors)
def dump_ir(tensors, ir_format):
"""Return a dump of the tensors in the specified format.
Valid format are
- text: for LTC IR
- backend: for the activate backend IR
"""
if ir_format == "text":
return torch._C._lazy._get_tensors_text(tensors)
elif ir_format == "backend":
return torch._C._lazy._get_tensors_backend(tensors)
else:
raise RuntimeError(f"Unrecognized IR format: {ir_format}")
|
pytorch-master
|
torch/_lazy/debug.py
|
import copy
import dataclasses
import itertools
import os
from typing import Any, Callable, Dict, List
import torch
import torch._lazy as lazy
import torch._lazy.metrics as metrics
from torch import fx
from torch._lazy import computation, debug as lazy_debug
from torch._lazy.tensor_factory_functions import tensor_factory_functions
debug = os.environ.get("debug_extract_compiled_graph") is not None
@dataclasses.dataclass
class GraphInputMatcher:
"""
The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing.
Specifically, those graph inputs corresponding to method parameters should be replaced with the
arguments for the current call.
tensor_id_to_arg_idx maps the tensor id to the parameter index.
graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the
TS/XLA graph inputs.
"""
tensor_id_to_arg_idx: Dict[int, int]
graph_input_tensor_ids: List[int]
# there are 2 categories of graph_input_tensors.
# Category 1: those whose id are not found in tensor_id_to_arg_idx. These are
# most likely const tensors and we can get its content from graph_input_tensors
# Category 2: those whose id are found in tensor_id_to_arg_idx. We should get
# the tensor from method arguments
graph_input_ivalues: List[Any]
# get the real graph input tensors
def __call__(self, args):
real_input = []
for tensor_id, traced_ivalue in zip(
self.graph_input_tensor_ids, self.graph_input_ivalues
):
arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None)
if arg_idx is None:
inp = traced_ivalue
else:
inp = args[arg_idx]
real_input.append(inp)
return real_input
class ReturnValueHandler:
r"""
When ltc_sync_multi is called on multi tensors, the compiled graph
will contain output only for unique tensors - if a tensor appears multiple
times in the input to _ltc_sync_multi, only the first occurance matters.
However from python level, we still expect multi tensors returned with duplciation
even if the TS graph dedup the output. e.g. for method:
def forward(self, a):
return a, a
the TS graph captured by LTC will return a single tensor, but Python method expects 2.
This class dedup the lazy tensors first to get the index that will be used
to duplicate the eager tensors later.
"""
def __init__(self, lazy_out_list):
self.index: List[List[int]] = []
self.total_count = len(lazy_out_list)
tensor_id_to_idx: Dict[int, int] = dict()
for dup_idx, lazy_tensor in enumerate(lazy_out_list):
uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None)
if uniq_idx is not None:
self.index[uniq_idx].append(dup_idx)
else:
uniq_idx = len(self.index)
self.index.append([dup_idx])
tensor_id_to_idx[id(lazy_tensor)] = uniq_idx
def duplicate_eager_tensors(self, eager_tensor_list):
duplicated_list = [None] * self.total_count
assert len(eager_tensor_list) == len(self.index)
for uniq_idx, eager_tensor in enumerate(eager_tensor_list):
for dup_idx in self.index[uniq_idx]:
duplicated_list[dup_idx] = eager_tensor
return duplicated_list
def force_lazy_device(model: fx.GraphModule):
"""
Factory methods in a Fx graph may create tensors for a specific eager devices.
If we take no actions, those eager tensors will be mixed with lazy tensors and
cause crash. This method overwrite those eager device to lazy device.
"""
def tolazydevice(dev):
if isinstance(dev, torch.device):
return torch.device("lazy", index=dev.index)
return dev
def hasDeviceArg(args, kwargs):
return any(
isinstance(arg, torch.device)
for arg in itertools.chain(args, kwargs.values())
)
for nd in model.graph.nodes:
nd.args = tuple(tolazydevice(arg) for arg in nd.args)
nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()}
# For torchbench like yolov3, hf_Bart, dynamo generates Fx graph that return
# eager tensors on the default device
# (check https://gist.github.com/shunting314/eabdf6c769c59bc384469717b8f9bb7f for yolove,
# and https://gist.github.com/shunting314/8d5e2d9348a3258959d3954186c48814 for hf_Bart).
# To force those tensors on the lazy device, we can not simply override
# the device argument since there is no explicit device argument.
# What we are doing here is, for the list of covered tensor factory methods
# we add a lazy device argument explicity.
#
# TODO: This solution is no ideal since we may miss some factory methods. In future
# when we support lazy mode, this method can be replaced by that.
if nd.target in tensor_factory_functions and not hasDeviceArg(
nd.args, nd.kwargs
):
kwargs = dict(nd.kwargs) # nd.kwargs is immutable. make a mutable copy.
kwargs["device"] = torch.device("lazy")
nd.kwargs = kwargs
model.recompile()
def get_fallback_ops():
fallback_ops = []
for opname in metrics.counter_names():
if "aten::" not in opname:
continue
val = int(metrics.counter_value(opname))
if val > 0:
fallback_ops.append(f"{opname}={val}")
return fallback_ops
def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable:
"""
Optimize an eager model with LTC and returns a wrapper to execute the
compiled graph directly without retracing. It depends on other mechanisms
like TorchDynamo guards to guarantee the returned wrapper is only called
when it's safe.
"""
lazy_args = [arg.to(device="lazy") for arg in example_inputs]
args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args]
tensor_id_to_arg_idx = {tensor_id: i for i, tensor_id in enumerate(args_tensor_ids)}
lazy_model = copy.deepcopy(model).to(device=torch.device("lazy"))
force_lazy_device(lazy_model)
# This line executes lazy tracing and enable us extracting compiled graph later
metrics.reset()
lazy_out = lazy_model(*lazy_args)
fallback_ops = get_fallback_ops()
metrics.reset()
if len(fallback_ops) > 0:
raise RuntimeError(
f"Fail to extact the compiled graph because of fallback: {','.join(fallback_ops)}"
)
if not isinstance(lazy_out, (tuple, list)):
lazy_out = (lazy_out,)
args_and_out = tuple(lazy_args) + tuple(lazy_out)
return_value_handler = ReturnValueHandler(args_and_out)
if debug:
print("Fx code:\n", model.code)
print("LTC IR:", lazy_debug.dump_ir(args_and_out, "text"))
# TODO: this part is TS backend specific for now and will be generalized to
# support XLA
(
graph_input_tensor_ids,
graph_input_ivalues,
) = computation.get_tensors_ts_device_data_node(args_and_out)
assert len(graph_input_tensor_ids) == len(graph_input_ivalues)
graph_input_matcher = GraphInputMatcher(
tensor_id_to_arg_idx, graph_input_tensor_ids, graph_input_ivalues
)
graph_hash = computation.get_graph_hash(args_and_out)
if debug:
print("graph_hash", graph_hash)
print(f"args_tensor_ids {args_tensor_ids}")
print("tensor ids from device data:", graph_input_tensor_ids)
# sync the list of output tensors so the computation graph for these
# tensors will be cached. Those computation graphs can be retrieved
# by graph hash later.
lazy.sync_multi(args_and_out, [])
def optimized_mod(*args):
if len(args_and_out) == 0:
return ()
graph_input = graph_input_matcher(args)
res = return_value_handler.duplicate_eager_tensors(
computation.run_cached_graph(graph_hash, graph_input)
)
assert len(res) == len(args_and_out)
for i, arg in enumerate(args):
# only copy those tensors that get inplace updated
if arg is not res[i]:
arg.copy_(res[i])
# skip the args
return res[len(args) :]
return optimized_mod
|
pytorch-master
|
torch/_lazy/extract_compiled_graph.py
|
import torch._C._lazy
def dump(dot_file_name: str):
"""Dump TrieCache in the dot format"""
return torch._C._lazy._dump_ir_cache(dot_file_name)
def reset():
"""Clear TrieCache. This is needed in testing to avoid
node reusing between different tests.
"""
return torch._C._lazy._clear_ir_cache()
|
pytorch-master
|
torch/_lazy/ir_cache.py
|
import torch._C._lazy_ts_backend
def init():
"""Initializes the lazy Torchscript backend"""
torch._C._lazy_ts_backend._init()
|
pytorch-master
|
torch/_lazy/ts_backend.py
|
import torch._C._lazy
import torch._C._lazy_ts_backend
def get_tensors_ts_device_data_node(tensors):
"""Return tensor ids and eager tensors for DeviceData nodes in the
IR for the passed in lazy tensors.
TODO: This API is currently ts backend specific. We are working on
generalizing it to all backends including XLA.
"""
return torch._C._lazy_ts_backend._get_tensors_ts_device_data_node(tensors)
def get_graph_hash(tensors):
"""Return the graph hash for the passed in lazy tensors"""
return torch._C._lazy._get_graph_hash(tensors)
def run_cached_graph(hash_str, graph_inputs):
"""Running the cached computation graph with the given inputs
TODO: This API is currently ts backend specific. We are working on
generalizing it to all backends including XLA.
"""
return torch._C._lazy_ts_backend._run_cached_graph(hash_str, graph_inputs)
|
pytorch-master
|
torch/_lazy/computation.py
|
# -*- coding: utf-8 -*-
import warnings
# A workaround to support both TorchScript and MyPy:
from typing import Any, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
from torch import Tensor
from . import _docs
if TYPE_CHECKING:
from torch.types import _dtype as DType
DimOrDims = Optional[Union[int, Tuple[int], List[int]]]
else:
# The JIT doesn't understand Union, nor torch.dtype here
DType = int
DimOrDims = Optional[Tuple[int]]
__all__ = []
# All masked reduction/normalization operations have the same
# signatures. Here we introduce docstring templates that are applied
# to docstrings of reduction/normalization functions via
# _apply_docstring_templates decorator.
def _apply_docstring_templates(func):
"""Decorator that applies docstring templates to function docstring
and returns the function instance.
"""
doc_string = getattr(_docs, f"{func.__name__}_docstring", None)
if doc_string is None:
warnings.warn(
f"No documentation string available for {func.__name__}."
" PyTorch team should run `python tools/update_masked_docs.py`"
" to generate the missing docstrings."
)
else:
func.__doc__ = doc_string
# Expose function as public symbol
__all__.append(func.__name__)
return func
def _generate_docstring(func):
"""An utility function called from tools/update_masked_docs.py
script to update the module torch._masked._docs.py
"""
docstring_templates = dict(
reduction_signature="""\
{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""",
reduction_descr="""\
Returns {operation name} of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.""",
reduction_args="""\
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in {operation name} computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of {operation name} operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
{args_declarations}
Keyword args:
{kwargs_declarations}""",
reduction_example="""\
Example::
>>> input = {example_input}
>>> input
{indent_example_input}
>>> mask = {example_mask}
>>> mask
{indent_example_mask}
>>> {full_function_name}(input, {example_args}, mask=mask)
{indent_example_output}
""",
reduction_identity="""\
The identity value of {operation name} operation, which is used to start the reduction, is ``{identity_int32}``.""",
reduction_identity_dtype="""\
The identity value of {operation name} operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``{identity_float32}``, ``{identity_uint8}``, and ``{identity_int32}``, respectively.""",
normalization_signature="""\
{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""",
normalization_descr="""\
Returns {operation name} of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
{definition}""",
normalization_args="""\
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
{operation name} computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the {operation name} output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
{args_declarations}
Keyword args:
{kwargs_declarations}""",
normalization_example="""\
Example::
>>> input = {example_input}
>>> input
{indent_example_input}
>>> mask = {example_mask}
>>> mask
{indent_example_mask}
>>> {full_function_name}(input, {example_args}, mask=mask)
{indent_example_output}
""",
)
args_and_kwargs = dict(
# argument name sufficies separated by double underscore will
# be removed in the final documentation string.
sum=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
prod=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
cumsum=(("dim__as_int",), ("dtype=None", "mask=None")),
cumprod=(("dim__as_int",), ("dtype=None", "mask=None")),
amin=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
amax=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
argmin=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
argmax=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
mean=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
median=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")),
norm=(
(
"ord",
"dim",
),
("keepdim=False", "dtype=None", "mask=None"),
),
var=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")),
std=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")),
logsumexp=(("dim",), ("keepdim=False", "dtype=None", "mask=None")),
softmax=(("dim__as_int",), ("dtype=None", "mask=None")),
log_softmax=(("dim__as_int",), ("dtype=None", "mask=None")),
softmin=(("dim__as_int",), ("dtype=None", "mask=None")),
normalize=(
(
"ord__required",
"dim__as_int",
),
("eps=1e-12", "dtype=None", "mask=None"),
),
)
argument_declarations = dict(
dim="""\
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.""",
dim__as_int="""\
dim (int): the dimension along which {operation name} is computed.""",
ord="""\
ord (int, float, optional): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
ord__required="""\
ord (int, float): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
unbiased="""\
unbiased (bool): when True, use Bessel’s correction, otherwise, compute
the uncorrected sample variance.""",
eps="""\
eps (float, optional): small value to avoid division by zero. Default: {default}.""",
keepdim="""\
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: {default}.""",
dtype="""\
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: {default}.""",
mask="""\
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.""",
)
definitions = dict(
softmax="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmax of i-th element in ``x`` is
defined as ``exp(x[i])/sum(exp(x))``.""",
log_softmax="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is
defined as ``log(exp(x[i])/sum(exp(x)))``.""",
softmin="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmin of i-th element in ``x`` is
defined as ``exp(-x[i])/sum(exp(-x))``.""",
normalize="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Normalize of i-th element in ``x`` is
defined as ``x[i]/max(norm(x, p), eps)``.""",
cumsum="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``sum(x[:i])``.""",
cumprod="""\
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``prod(x[:i])``.""",
)
reduction_names = dict(
sum="sum",
prod="product",
amax="maximum",
amin="minimum",
argmax="argmax",
argmin="argmin",
mean="mean",
median="median",
norm="norm",
var="variance",
std="standard_deviation",
logsumexp="logsumexp",
)
normalization_names = dict(
softmax="softmax",
log_softmax="log_softmax",
softmin="softmin",
normalize="normalize",
cumsum="cumulative_sum",
cumprod="cumulative_prod",
)
operation_names = dict()
operation_names.update(reduction_names)
operation_names.update(normalization_names)
# Default example data:
example_dim = 1
example_input = torch.tensor([[-3, -2, -1], [0, 1, 2]])
example_mask = torch.tensor([[True, False, True], [False, False, False]])
example_args: Tuple[Any, ...]
if func.__name__ in {"norm", "normalize"}:
example_args = (2.0, example_dim)
example_input = example_input.to(dtype=torch.float32)
elif func.__name__ in {"var", "std"}:
example_args = (example_dim, False)
elif func.__name__ == "median":
example_args = (example_dim,)
example_input = example_input.to(dtype=torch.float32)
else:
example_args = (example_dim,)
operation_args: Tuple[str, ...]
operation_kwargs: Tuple[str, ...]
operation_args, operation_kwargs = args_and_kwargs[func.__name__]
arg_declarations = [
"\n ".join(
argument_declarations.get(a, f'{a.split("__", 1)[0]}: TBD.').splitlines()
)
for a in operation_args
]
kwarg_declarations = [
"\n ".join(
argument_declarations.get(
a.split("=", 1)[0], f'{a.split("__", 1)[0]}: TBD.'
)
.format(default=a.split("=", 1)[1])
.splitlines()
)
for a in operation_kwargs
]
if func.__name__ in reduction_names:
op_kind = "reduction"
doc_sections = ["signature", "descr", "identity", "args", "example"]
elif func.__name__ in normalization_names:
op_kind = "normalization"
doc_sections = ["signature", "descr", "args", "example"]
example_input = example_input.to(dtype=torch.float32)
else:
assert 0 # add function name to operation names dictionaries
example_output = func(example_input, *example_args, mask=example_mask)
template_data = {
"function_name": func.__name__,
"full_function_name": func.__module__ + "." + func.__name__,
"operation name": operation_names[func.__name__],
"operation_args": ", ".join(a.split("__", 1)[0] for a in operation_args),
"operation_kwargs": ", ".join(a.split("__", 1)[0] for a in operation_kwargs),
# one-line representation of a tensor:
"example_input": " ".join(str(example_input).split()),
"example_args": ", ".join(map(str, example_args)),
"example_mask": " ".join(str(example_mask).split()),
# multi-line representation of a tensor with indent
"indent_example_input": ("\n ").join(str(example_input).splitlines()),
"indent_example_mask": ("\n ").join(str(example_mask).splitlines()),
"indent_example_output": ("\n ").join(str(example_output).splitlines()),
}
if func.__name__ in reduction_names:
template_data.update(
identity_uint8=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.uint8)
),
identity_int32=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.int32)
),
identity_float32=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.float32)
),
)
if func.__name__ == "norm":
template_data.update(
identity_ord_ninf=_reduction_identity(
func.__name__, torch.tensor(0, dtype=torch.float32), float("-inf")
)
)
elif func.__name__ in normalization_names:
template_data.update(definition=definitions[func.__name__])
else:
assert 0 # add function name to operation names dictionaries
template_data.update(
args_declarations=("\n ".join(arg_declarations)).format_map(template_data)
)
template_data.update(
kwargs_declarations=("\n ".join(kwarg_declarations)).format_map(
template_data
)
)
# Apply function name info to docstring templates:
templates = dict(
(k, v.format_map(template_data))
for k, v in docstring_templates.items()
if k.startswith(op_kind)
)
templates.update(
(k, v.format_map(template_data) if isinstance(v, str) else v)
for k, v in template_data.items()
)
# Apply docstring templates to function doctring:
if func.__doc__ is None:
doc_template = "\n\n".join([f"{{{op_kind}_{sec}}}" for sec in doc_sections])
else:
doc_template = func.__doc__
return doc_template.format_map(templates)
def _reduction_identity(op_name: str, input: Tensor, *args):
"""Return identity value as scalar tensor of a reduction operation on
given input, or None, if the identity value cannot be uniquely
defined for the given input.
The identity value of the operation is defined as the initial
value to reduction operation that has a property ``op(op_identity,
value) == value`` for any value in the domain of the operation.
Or put it another way, including or exlucing the identity value in
a list of operands will not change the reduction result.
See https://github.com/pytorch/rfcs/pull/27 for more information.
"""
dtype: DType = input.dtype
device = input.device
op_name = op_name.rsplit(".", 1)[-1] # lstrip module name when present
if op_name in {"sum", "cumsum"}:
return torch.tensor(0, dtype=dtype, device=device)
elif op_name in {"prod", "cumprod"}:
return torch.tensor(1, dtype=dtype, device=device)
elif op_name in {"amax", "argmax", "logsumexp"}:
if torch.is_floating_point(input):
return torch.tensor(-torch.inf, dtype=dtype, device=device)
elif torch.is_signed(input) or dtype == torch.uint8:
return torch.tensor(torch.iinfo(dtype).min, dtype=dtype, device=device)
elif op_name in {"amin", "argmin"}:
if torch.is_floating_point(input):
return torch.tensor(torch.inf, dtype=dtype, device=device)
elif torch.is_signed(input) or dtype == torch.uint8:
return torch.tensor(torch.iinfo(dtype).max, dtype=dtype, device=device)
elif op_name == "mean":
# Strictly speaking, the identity value of the mean operation
# is the mean of the input. Since the mean value depends on
# the dim argument and it may be a non-scalar tensor, we
# consider the identity value of the mean operation ambiguous.
# Moreover, the mean value of empty input is undefined.
return None
elif op_name == "norm":
ord = args[0] if args else 2
if ord == float("-inf"):
assert torch.is_floating_point(input), input.dtype
return torch.tensor(torch.inf, dtype=dtype, device=device)
return torch.tensor(0, dtype=dtype, device=device)
elif op_name == "median":
# We use NaN for now because the implementation is currently using torch.nanmedian
# and NaN is the identity for that function since it gets ignored
dtype = input.dtype if torch.is_floating_point(input) else torch.float
return torch.tensor(torch.nan, dtype=dtype, device=device)
elif op_name in {"var", "std"}:
return None
raise NotImplementedError(f"identity of {op_name} on {dtype} input")
def _canonical_dim(dim: DimOrDims, ndim: int) -> Tuple[int, ...]:
"""Return dim argument as a tuple of sorted dim values."""
dims: List[int] = []
if dim == ():
# Currently, `dim=()` in reductions operations means "reduce
# over all dimensions" while in future, it will read "no
# reduce". See https://github.com/pytorch/pytorch/issues/29137
# When gh-29137 is resolved, this if-block must be deleted.
dim = None
if dim is None:
return tuple(range(ndim))
ndim = max(ndim, 1)
dim_ = (dim,) if isinstance(dim, int) else dim
for d in dim_:
if d in dims:
raise RuntimeError(f"dim={d} appears multiple times in the list of dims")
if d >= ndim or d < -ndim:
raise IndexError(
f"Dimension out of range (expected to be in range of [{-ndim}, {ndim-1}], but got {d})"
)
dims.append(d % ndim)
return tuple(sorted(dims))
def _sparse_coo_flatten_indices(indices: Tensor, shape: tuple):
# Flatted N-D indices to 1-D indices
flat_indices = indices.new_zeros(indices.size(1))
for d, sz in enumerate(shape):
flat_indices.mul_(sz)
flat_indices.add_(indices[d])
return flat_indices
def _any(input: Tensor, dim: tuple, keepdim: bool):
# Support torch.any with tuple dim argument.
# Workaround of https://github.com/pytorch/pytorch/issues/56586
r = input
for d in reversed(dim):
r = r.any(dim=d, keepdim=keepdim)
return r
def _sparse_coo_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
"""Sparse variant of torch.where. Supports sparse COO and hybrid sparse COO tensors.
_sparse_coo_where implements the following invariant:
_sparse_coo_where(mask, input, fill_value).to_dense(fill_value) ==
torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))
where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
tensor, and `to_dense(fill_value)` is like `to_dense()` except
that the unspecified elements are mapped to `fill_value` rather
than to `0`.
Returns a sparse COO tensor with the following features:
- all specified elements correspond to masked-in elements that
have the values of the input tensor. If there exists a masked-in
element (as specified by mask) that is not specified in the
input, in the result tensor, the corresponding element has value
0. In the dense part of the sparse tensor, the masked-out
elements are replaced with fill_value.
- all unspecified elements correspond to masked-out elements.
"""
assert input.layout == torch.sparse_coo
assert mask.layout == input.layout
assert mask.shape == input.shape
assert mask.dense_dim() == input.dense_dim() # TODO: eliminate this restriction
input = input.coalesce()
# For set operations on sparse tensor indices, we'll convert
# multi-dimensional indices to 1-D indices for efficiency.
input_flat_indices = _sparse_coo_flatten_indices(
input.indices(), input.shape[: input.sparse_dim()]
)
mask_flat_indices = _sparse_coo_flatten_indices(
mask.indices(), mask.shape[: mask.sparse_dim()]
)
# the set of mask flat indices that define masked-in elements:
if mask.dense_dim() > 0:
mask_values = _any(
mask.values(), tuple(range(1, input.sparse_dim() + 1)), False
)
else:
mask_values = mask.values()
maskin_flat_indices = mask_flat_indices[mask_values.nonzero()[:, 0]]
def intersection(i1, i2):
union, counts = torch.cat([i1, i2]).unique(return_counts=True)
return union, torch.where(counts.gt(1))
def minus(i1, i2):
union, counts = torch.cat([i1, i2]).unique(return_counts=True)
return intersection(union[torch.where(counts.eq(1))], i1)
def _apply(a):
obj, w = a
return obj[w]
# the set of input flat indices of specified and masked-in elements:
maskin_input_flat_indices = _apply(
intersection(maskin_flat_indices, input_flat_indices)
)
_, w = intersection(input_flat_indices, maskin_input_flat_indices)
# the indices and values of masked-in elements
where_input_indices = input.indices()[(slice(None),) + w]
where_input_values = input.values()[w]
if mask.dense_dim() > 0:
# apply mask to the dense part of the input values:
_, w1 = intersection(mask_flat_indices, maskin_input_flat_indices)
where_mask_values = mask.values()[w1]
where_input_values = torch.where(
where_mask_values, where_input_values, fill_value
)
# the set of flat indices of unspecified input and masked-in elements:
maskin_zero_flat_indices = _apply(
minus(maskin_flat_indices, maskin_input_flat_indices)
)
# the indices of masked-in zero elements
_, w = intersection(mask_flat_indices, maskin_zero_flat_indices)
where_zero_indices = mask.indices()[(slice(None),) + w]
# construct result
n = where_zero_indices.size(1)
if n == 0:
# the input is coalesced, hence input_flat_indices are ordered
# and the result is guaranteed to be coalesced:
result = torch.sparse_coo_tensor(
where_input_indices, where_input_values, input.shape
)
return result._coalesced_(True)
where_indices = torch.cat([where_input_indices, where_zero_indices], dim=1)
where_values = torch.cat(
[
where_input_values,
where_input_values.new_zeros((n,) + where_input_values.shape[1:]),
]
)
result = torch.sparse_coo_tensor(where_indices, where_values, input.shape)
# appending zero elements leads to uncoalesced sparse tensor
return result.coalesce()
def _sparse_coo_scatter_reduction_helper(
op,
mask_input: Tensor,
dims: Tuple[int, ...],
keepdim: bool,
dtype: Optional[DType] = None,
) -> Tensor:
reduce = op.__name__
valid_reductions = ["sum", "prod", "amax", "amin"]
if reduce not in valid_reductions:
raise ValueError(
f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead"
)
output_dtype = dtype
values, indices = mask_input._values(), mask_input._indices()
input_dims = mask_input.dim()
num_sparse_dims = mask_input.sparse_dim()
reduced_sparse_dims = []
retained_sparse_dims = []
reduced_dense_dims = []
# promote dtype if specified
if values.dtype != output_dtype:
values = values.to(output_dtype)
if keepdim:
output_shape = tuple(
1 if i in dims else si for (i, si) in enumerate(mask_input.shape)
)
else:
output_shape = tuple(
si for (i, si) in enumerate(mask_input.shape) if i not in dims
)
for d in dims:
if d >= input_dims:
continue
if d < num_sparse_dims:
reduced_sparse_dims.append(d)
else:
reduced_dense_dims.append(d + 1 - num_sparse_dims)
# Reduce dense dimensions
if len(reduced_dense_dims) > 0:
if reduce == "sum":
new_values = values
new_values = op(new_values, dim=reduced_dense_dims, keepdim=bool(keepdim))
else:
# FIXME: Implement reductions for dense dimensions for ops with non-zero reduction identities
return NotImplemented
else:
new_values = values.clone()
# Reduce sparse dimensions
if len(reduced_sparse_dims) == num_sparse_dims:
if reduce in {"amax", "amin"} and new_values.size(0) == 0:
# IndexError: amax(): Expected reduction dim 0 to have non-zero size.
# sum()/prod() return the reduction identity when dim has size 0 but amax()/amin() do not
# See https://github.com/pytorch/pytorch/issues/61901
new_values = _reduction_identity(reduce, new_values)
else:
new_values = op(new_values, dim=0)
if keepdim:
for _ in range(num_sparse_dims):
new_values = new_values.unsqueeze(0)
return new_values.to(dtype=output_dtype).to_sparse()
else:
new_indices = indices.clone()
if keepdim:
# zero out reduced sparse dimensions if keepdim = True
# ensures that the call to torch.unique folds duplicated indices together while preserving the dimension
new_indices[reduced_sparse_dims, :] = 0
else:
# remove reduced sparse dimensions if keepdim = False
if len(reduced_sparse_dims) > 0:
retained_sparse_dims = [
i
for i in range(num_sparse_dims)
if i not in set(reduced_sparse_dims)
]
new_indices = new_indices.index_select(
0, torch.tensor(retained_sparse_dims).to(mask_input.device)
)
# Use scatter_reduce to reduce items in the new_values tensor that correspond to the same indices in new_indices
if new_indices.numel() > 0:
# lexsort indices and get index tensor for scatter reduction
new_indices, inverse_indices = torch.unique(
new_indices, return_inverse=True, dim=1
)
out_shape = list(new_values.shape)
out_shape[0] = new_indices.shape[1]
for _ in range(new_values.ndim - 1):
inverse_indices = inverse_indices.unsqueeze(-1)
scatter_indices = inverse_indices.expand(new_values.shape)
# FIXME: temporary workaround for issue with bfloat16/float16 remove when acctype is implemented for scatter_reduce
if output_dtype in {torch.bfloat16, torch.float16}:
new_values = new_values.to(torch.float)
out = new_values.new_empty(out_shape)
new_values = out.scatter_reduce_(
0, scatter_indices, new_values, reduce=reduce, include_self=False
)
new_values = new_values.to(dtype=output_dtype)
else:
out = new_values.new_empty(out_shape)
new_values = out.scatter_reduce_(
0, scatter_indices, new_values, reduce=reduce, include_self=False
)
return torch.sparse_coo_tensor(
new_indices,
new_values,
output_shape,
dtype=output_dtype,
device=mask_input.device,
)
def _sparse_csr_segment_reduction_helper(
op,
mask_input: Tensor,
dims: Tuple[int, ...],
keepdim: bool,
dtype: Optional[DType] = None,
) -> Tensor:
# Currently, while sparse CSR is always 2D with no dense dimensions keepdim must be True
# FIXME: when dense dimensions are implemented for CSR tensors
assert (
keepdim
), "reduction operations on CSR tensors with keepdim=False is unsupported"
reduce = op.__name__
valid_reductions = ["sum", "prod", "mean", "amax", "amin"]
if reduce not in valid_reductions:
raise ValueError(
f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead"
)
device = mask_input.device
output_dtype = dtype
values, crow_indices, col_indices = (
mask_input.values(),
mask_input.crow_indices(),
mask_input.col_indices(),
)
# promote dtype if specified
if values.dtype != output_dtype:
values = values.to(output_dtype)
if len(dims) == 0:
return mask_input
if len(dims) == 1:
if dims[0] == 0:
new_col_indices, scatter_indices = torch.unique(
col_indices, return_inverse=True
)
new_nnz = new_col_indices.shape[0]
new_crow_indices = torch.tensor([0, new_nnz])
new_values = values.new_empty(new_col_indices.shape)
new_values.scatter_reduce_(
0, scatter_indices, values, reduce, include_self=False
)
new_shape = [1, mask_input.size(1)]
else:
assert (
dims[0] == 1
), "Sparse CSR tensors are 2D and only support reduction along dim 0 or 1."
# all intervals new_crow_indices[i] - new_crow_indices[i-1] are 1
# except for where crow_indices[i] == crow_indices[i-1] where the interval remains as 0
new_crow_indices = torch.cat(
(
crow_indices.new_zeros(1),
torch.cumsum(torch.diff(crow_indices) != 0, 0),
),
0,
)
new_nnz = new_crow_indices[-1]
new_col_indices = col_indices.new_zeros(new_nnz)
# segment_reduce takes 'max'/'min' rather than 'amax'/'amin', changing this would be BC-breaking
if reduce in ["amax", "amin"]:
reduce = reduce[1:]
new_values = torch.segment_reduce(values, reduce, offsets=crow_indices)
new_shape = [mask_input.size(0), 1]
else:
assert len(dims) == 2
nnz = min(1, values.numel())
if nnz == 1:
op_kwargs = {"keepdim": True, "dtype": output_dtype}
# amax and amin do not support dtype kwarg
if reduce in ["amax", "amin"]:
del op_kwargs["dtype"]
new_values = op(values, 0, **op_kwargs)
else:
new_values = torch.empty(0, dtype=output_dtype)
new_col_indices = col_indices.new_zeros(nnz)
new_crow_indices = torch.tensor([0, nnz])
new_shape = [1, nnz]
return torch.sparse_csr_tensor(
new_crow_indices,
new_col_indices,
new_values,
new_shape,
dtype=output_dtype,
device=device,
)
def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
"""Sparse variant of torch.where. Supports sparse CSR tensors."""
# TODO: implement sparse CSR specific where operator for efficiency
return _sparse_coo_where(
mask.to_sparse_coo(), input.to_sparse_coo(), fill_value
).to_sparse_csr()
def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:
"""torch.where with sparse inputs support.
_where implements the following invariant:
_where(mask, input, fill_value).to_dense(fill_value) ==
torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))
where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
tensor, and `to_dense(fill_value)` is like `to_dense()` except
that the unspecified elements are mapped to `fill_value` rather
than to `0`.
Returns a sparse tensor with the following features:
- all specified elements correspond to masked-in elements that
have the values of the input tensor. If there exists a masked-in
element (as specified by mask) that is not specified in the
input, in the result tensor, the corresponding element has value
0. In the dense part of the sparse tensor, the masked-out
elements are replaced with fill_value.
- all unspecified elements correspond to masked-out elements.
"""
if mask.layout == torch.strided:
return torch.where(mask, input, fill_value)
elif mask.layout == torch.sparse_coo:
return _sparse_coo_where(mask, input, fill_value)
elif mask.layout == torch.sparse_csr:
return _sparse_csr_where(mask, input, fill_value)
else:
raise ValueError(
f"_where expects strided or sparse COO or sparse CSR tensor but got {mask.layout}"
)
def _input_mask(input: Tensor, *args, **kwargs) -> Tensor:
"""Return canonical input mask.
A canonical input mask is defined as a boolean mask tensor that
shape and layout matches with the shape and the layout of the
input.
The canonical input mask is computed from the :attr:`mask` tensor
content to meet the following criteria:
1. The shape of the canonical input mask is the same as the shape
of :attr:`input` tensor. If the mask tensor has a smaller shape
than the shape of the :attr:`input`, broadcasting rules will be
applied. Downcasting of mask is not supported.
2. The layout of the canonical input mask is the same as the
layout of the :attr:`input` tensor. If the mask has different
layout, it will be converted to the expected layout. In the
case of sparse COO layout, the canonical input mask will be
coalesced.
3. The dtype of the canonical input mask is torch.bool. If the
mask dtype is not bool then it will be converted to bool dtype
using `.to(dtype=bool)` method call.
4. The elements of the canonical input mask have boolean values
copied from the content of the :attr:`mask` tensor (after
possible broadcasting and dtype conversion transforms). In
general, the sparsity pattern of the sparse canonical input
mask need not to be the same as the sparsity pattern of the
sparse :attr:`input` tensor.
"""
if input.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}:
raise ValueError(
f"_input_mask expects strided or sparse COO or sparse CSR tensor but got {input.layout}"
)
mask = kwargs.get("mask")
# default mask
if mask is None:
raise ValueError("_input_mask requires explicit mask")
# mask shape must match with input shape
if mask.shape != input.shape:
if mask.ndim > input.ndim:
raise IndexError(
"_input_mask expected broadcastable mask (got mask dimensionality higher than of the input)"
)
if mask.layout == torch.strided:
mask = torch.broadcast_to(mask.clone(), input.shape).to(dtype=torch.bool)
elif mask.layout == torch.sparse_coo:
mask = torch._sparse_broadcast_to(mask, input.shape)
else:
assert mask.layout == torch.sparse_csr
# Broadcasting of CSR tensors is not implemented. Working
# around by using COO layout.
mask = torch._sparse_broadcast_to(
mask.to_sparse(), input.shape
).to_sparse_csr()
# mask layout must match with input layout
if mask.layout != input.layout:
if input.layout == torch.strided:
mask = mask.to_dense()
elif input.layout == torch.sparse_coo:
if mask.layout == torch.strided:
mask = mask.to_sparse(input.sparse_dim())
else:
mask = mask.to_sparse()
else:
assert input.layout == torch.sparse_csr
mask = mask.to_sparse_csr()
# sparse mask must be coalesced
if mask.layout == torch.sparse_coo:
mask = mask.coalesce()
# mask is a boolean tensor
mask = mask.to(dtype=torch.bool)
return mask
def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor:
"""Return output mask of masked operation applied to given arguments."""
if callable(op):
is_reduction = op.__name__ in {
"sum",
"prod",
"amax",
"amin",
"argmax",
"argmin",
"mean",
"median",
"norm",
"var",
"std",
"logsumexp",
}
is_normalization = op.__name__ in {
"softmax",
"log_softmax",
"softmin",
"normalize",
"cumsum",
"cumprod",
}
if is_reduction:
if op.__name__ == "norm":
if args:
args = args[1:] # lstrip ord argument
dim = args[0] if args else kwargs.get("dim")
outmask = _input_mask(input, *args, **kwargs)
keepdim = kwargs.get("keepdim", False)
dim_ = _canonical_dim(dim, input.ndim)
return _any(outmask, dim_, bool(keepdim))
elif is_normalization:
return _input_mask(input, *args, **kwargs)
else:
raise ValueError(
f"_output_mask expected masked operation (got callable {op.__module__}.{op.__name__})"
)
else:
raise ValueError(
f"_output_mask expected masked operation (got {type(op).__name__} object)"
)
def _combine_input_and_mask(op, input: Tensor, mask, *args) -> Tensor:
"""Return input with masked-out elements eliminated for the given operations."""
if mask is None:
return input
canonical_mask = _input_mask(input, mask=mask)
if callable(op):
fill_value = _reduction_identity(op.__name__, input, *args)
return _where(canonical_mask, input, fill_value)
else:
raise ValueError(
f"_combine_input_and_mask expected masked operation (got {type(op).__name__} object)"
)
@_apply_docstring_templates
def sum(
input: Tensor,
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
# __doc__ is generated by _apply_docstring_templates decorator
if dtype is None:
# promote integer types to int64 when output dtype is not specified
if input.layout == torch.sparse_csr:
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
# csr.to(dtype=torch.int64) is not implemented, so
# using coo.to on input to ensure the promoted dtype
input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr()
else:
dtype = input.dtype
else:
dtype = input.dtype
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
dtype = torch.int64
dim_ = _canonical_dim(dim, input.ndim)
mask_input = _combine_input_and_mask(sum, input, mask)
if input.layout == torch.strided:
return torch.sum(mask_input, dim_, bool(keepdim), dtype=dtype)
elif input.layout == torch.sparse_coo:
return _sparse_coo_scatter_reduction_helper(
torch.sum, mask_input, dim_, bool(keepdim), dtype
)
elif input.layout == torch.sparse_csr:
return torch._sparse_csr_sum(
mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype
)
else:
raise ValueError(
f"masked sum expects strided, sparse_coo or sparse_csr tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def prod(
input: Tensor,
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
# __doc__ is generated by _apply_docstring_templates decorator
if dtype is None:
# promote integer types to int64 when output dtype is not specified
if input.layout == torch.sparse_csr:
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
# csr.to(dtype=torch.int64) is not implemented, so
# using coo.to on input to ensure the promoted dtype
input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr()
else:
dtype = input.dtype
else:
dtype = input.dtype
if input.dtype in {
torch.uint8,
torch.bool,
torch.int8,
torch.int16,
torch.int32,
}:
dtype = torch.int64
dim_ = _canonical_dim(dim, input.ndim)
mask_input = _combine_input_and_mask(prod, input, mask)
if input.layout == torch.strided:
# Workaround https://github.com/pytorch/pytorch/issues/56586
result = mask_input
result = result.to(dtype=dtype)
for d in reversed(dim_):
result = result.prod(dim=d, keepdim=bool(keepdim))
return result
elif input.layout == torch.sparse_coo:
if mask is None:
# See comment in the sparse_csr branch, the same issue arises for sparse_coo tensors
raise ValueError(
"masked prod expects explicit mask for sparse_coo tensor input"
)
return _sparse_coo_scatter_reduction_helper(
torch.prod, mask_input, dim_, bool(keepdim), dtype
)
elif input.layout == torch.sparse_csr:
if mask is None:
# mask is None corresponds to all-True mask. The
# unspecified elements in the CSR tensor correspond to
# zero values. Hence, the prod reduction result is
# automatically zero unless all elements are specified.
# A semi-optimal way to take this into account is to use:
#
# masked_prod(csr, ..., mask=None) == torch._sparse_csr_prod(csr, ...) * all(csr.nonzero(), ...)
#
# but that requires implementing `all` and `nonzero`
# support for sparse csr tensors.
raise ValueError(
"masked prod expects explicit mask for sparse_csr tensor input"
)
return torch._sparse_csr_prod(
mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype
)
else:
raise ValueError(
f"masked prod expects strided, sparse_coo or sparse_csr tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def cumsum(
input: Tensor,
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(sum, input, mask)
if input.layout == torch.strided:
return torch.cumsum(mask_input, dim_, dtype=dtype).to(dtype=dtype)
else:
raise ValueError(
f"masked cumsum expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def cumprod(
input: Tensor,
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(prod, input, mask)
if input.layout == torch.strided:
return torch.cumprod(mask_input, dim_, dtype=dtype).to(dtype=dtype)
else:
raise ValueError(
f"masked cumprod expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def amax(
input: Tensor,
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(amax, input, mask)
dim_ = _canonical_dim(dim, mask_input.ndim)
if input.layout == torch.strided:
return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
elif input.layout == torch.sparse_coo:
if mask is None:
# See comment in the sparse_csr branch of prod, a similar issue arises here
# where unspecified elements along a dimension may need to be reduced with the result
raise ValueError(
"masked amax expects explicit mask for sparse_coo tensor input"
)
return _sparse_coo_scatter_reduction_helper(
torch.amax, mask_input, dim_, bool(keepdim), dtype
)
elif input.layout == torch.sparse_csr:
if mask is None:
raise ValueError(
"masked amax expects explicit mask for sparse_csr tensor input"
)
return _sparse_csr_segment_reduction_helper(
torch.amax, mask_input, dim_, bool(keepdim), dtype
)
else:
raise ValueError(
f"masked amax expects strided, sparse_coo or sparse_csr tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def amin(
input: Tensor,
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(amin, input, mask)
dim_ = _canonical_dim(dim, mask_input.ndim)
if input.layout == torch.strided:
return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
elif input.layout == torch.sparse_coo:
if mask is None:
# See comment in the sparse_csr branch of prod, a similar issue arises here
# where unspecified elements along a dimension may need to be reduced with the result
raise ValueError(
"masked amax expects explicit mask for sparse_coo tensor input"
)
return _sparse_coo_scatter_reduction_helper(
torch.amin, mask_input, dim_, bool(keepdim), dtype
)
elif input.layout == torch.sparse_csr:
if mask is None:
raise ValueError(
"masked amin expects explicit mask for sparse_csr tensor input"
)
return _sparse_csr_segment_reduction_helper(
torch.amin, mask_input, dim_, bool(keepdim), dtype
)
else:
raise ValueError(
f"masked amin expects strided, sparse_coo or sparse_csr tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def argmax(
input: Tensor,
dim: int = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(argmax, input, mask)
if input.layout == torch.strided:
return torch.argmax(mask_input, dim, bool(keepdim)).to(dtype=dtype)
else:
raise ValueError(
f"masked argmax expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def argmin(
input: Tensor,
dim: int = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(argmin, input, mask)
if input.layout == torch.strided:
return torch.argmin(mask_input, dim, bool(keepdim)).to(dtype=dtype)
else:
raise ValueError(
f"masked argmin expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def mean(
input: Tensor,
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
By definition, the identity value of a mean operation is the mean
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
mean is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
if input.layout == torch.strided:
if mask is None:
# TODO: compute count analytically
count = sum(
torch.ones(input.shape, dtype=torch.int64, device=input.device),
dim,
keepdim=keepdim,
)
total = sum(input, dim, keepdim=keepdim, dtype=dtype)
else:
inmask = _input_mask(input, mask=mask)
count = sum(
inmask.new_ones(input.shape, dtype=torch.int64),
dim,
keepdim=keepdim,
mask=inmask,
)
total = sum(input, dim, keepdim=keepdim, dtype=dtype, mask=inmask)
return total / count
elif input.layout == torch.sparse_csr:
mask_input = _combine_input_and_mask(mean, input, mask)
dim_ = _canonical_dim(dim, mask_input.ndim)
if mask is None:
raise ValueError(
"masked mean expects explicit mask for sparse_csr tensor input"
)
return _sparse_csr_segment_reduction_helper(
torch.mean, mask_input, dim_, bool(keepdim), dtype
)
else:
raise ValueError(
f"masked mean expects strided or sparse_csr tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def median(
input: Tensor,
dim: int = -1,
*,
keepdim: bool = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
By definition, the identity value of a median operation is the median
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
median is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
is_float = torch.is_floating_point(input)
if not is_float:
input = input.to(dtype=torch.float)
mask_input = _combine_input_and_mask(median, input, mask)
if input.layout == torch.strided:
output = torch.nanmedian(mask_input, dim_, keepdim).values
if is_float:
return output
elif not is_float and not torch.isnan(output).any():
return output.to(dtype=dtype)
else:
raise ValueError(
"masked median expects no fully masked out rows if dtype is not floating point"
)
else:
raise ValueError(
f"masked median expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def logsumexp(
input: Tensor,
dim: DimOrDims = None,
*,
keepdim: bool = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)
mask_input = _combine_input_and_mask(logsumexp, input, mask)
if input.layout == torch.strided:
return torch.logsumexp(mask_input, dim_, keepdim=keepdim).to(dtype=dtype)
else:
raise ValueError(
f"masked logsumexp expects strided tensor (got {input.layout} tensor)"
)
# TODO: Add docstring; currently they're only set up for reductions and normalizations
# @_apply_docstring_templates
def logaddexp(
input: Tensor,
other: Tensor,
*,
dtype: Optional[DType] = None,
input_mask: Optional[Tensor] = None,
other_mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
if input.layout == torch.strided and other.layout == torch.strided:
mask_input = _combine_input_and_mask(logsumexp, input, input_mask)
mask_other = _combine_input_and_mask(logsumexp, other, other_mask)
return torch.logaddexp(mask_input, mask_other).to(dtype=dtype)
else:
raise ValueError(
f"masked logaddexp expects strided tensors (got {input.layout} tensor for input, {other.layout} for other)"
)
@_apply_docstring_templates
def norm(
input: Tensor,
ord: Optional[float] = 2.0,
dim: DimOrDims = None,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
The identity value of norm operation, which is used to start the
reduction, is ``{identity_float32}``, except for ``ord=-inf`` it is
``{identity_ord_ninf}``.
{reduction_args}
{reduction_example}"""
if dtype is None:
dtype = input.dtype
mask_input = _combine_input_and_mask(norm, input, mask, ord)
if input.layout == torch.strided:
dim_ = _canonical_dim(dim, input.ndim)
return torch.linalg.vector_norm(
mask_input, ord, dim_, bool(keepdim), dtype=dtype
)
else:
raise ValueError(
f"masked norm expects strided tensor (got {input.layout} tensor)"
)
def std_var(
input: Tensor,
dim: DimOrDims = None,
unbiased: Optional[bool] = False,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
take_sqrt: Optional[bool] = False,
) -> Tensor:
if dtype is None:
dtype = input.dtype
if not (dtype.is_floating_point or dtype.is_complex):
dtype = torch.float32
compute_dtype = dtype
if not (compute_dtype.is_floating_point or compute_dtype.is_complex):
compute_dtype = torch.float32
if input.layout == torch.strided:
if mask is None:
# TODO: compute count analytically
count = sum(
torch.ones(input.shape, dtype=torch.int64, device=input.device),
dim,
keepdim=True,
)
sample_total = sum(input, dim, keepdim=True, dtype=dtype)
else:
inmask = _input_mask(input, mask=mask)
count = sum(
inmask.new_ones(input.shape, dtype=torch.int64),
dim,
keepdim=True,
mask=inmask,
)
sample_total = sum(input, dim, keepdim=True, dtype=dtype, mask=inmask)
# TODO: replace torch.subtract/divide/square/maximum with
# masked subtract/divide/square/maximum when these will be
# available.
sample_mean = torch.divide(sample_total, count)
x = torch.subtract(input, sample_mean)
if mask is None:
total = sum(x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype)
else:
total = sum(
x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype, mask=inmask
)
if not keepdim:
count = count.reshape(total.shape)
if unbiased:
count = torch.subtract(count, 1)
count = torch.maximum(count, count.new_zeros([]))
output = torch.divide(total, count).to(dtype=dtype)
if take_sqrt:
output = torch.sqrt(output)
return output
else:
raise ValueError(
f"masked std/var expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def var(
input: Tensor,
dim: DimOrDims = None,
unbiased: Optional[bool] = False,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
The identity value of sample variance operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
return std_var(
input=input,
dim=dim,
unbiased=unbiased,
keepdim=keepdim,
dtype=dtype,
mask=mask,
take_sqrt=False,
)
@_apply_docstring_templates
def std(
input: Tensor,
dim: DimOrDims = None,
unbiased: Optional[bool] = False,
*,
keepdim: Optional[bool] = False,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
"""\
{reduction_signature}
{reduction_descr}
The identity value of sample standard deviation operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
{reduction_args}
{reduction_example}"""
return std_var(
input=input,
dim=dim,
unbiased=unbiased,
keepdim=keepdim,
dtype=dtype,
mask=mask,
take_sqrt=True,
)
@_apply_docstring_templates
def softmax(
input: Tensor,
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(amax, input, mask)
if input.layout == torch.strided:
return torch.nn.functional.softmax(mask_input, dim_, dtype=dtype)
else:
raise ValueError(
f"masked softmax expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def log_softmax(
input: Tensor,
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(amax, input, mask)
if input.layout == torch.strided:
return torch.nn.functional.log_softmax(mask_input, dim_, dtype=dtype)
else:
raise ValueError(
f"masked log_softmax expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def softmin(
input: Tensor,
dim: int,
*,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
mask_input = _combine_input_and_mask(amin, input, mask)
if input.layout == torch.strided:
return torch.nn.functional.softmin(mask_input, dim_, dtype=dtype)
else:
raise ValueError(
f"masked softmin expects strided tensor (got {input.layout} tensor)"
)
@_apply_docstring_templates
def normalize(
input: Tensor,
ord: float,
dim: int,
*,
eps: float = 1e-12,
dtype: Optional[DType] = None,
mask: Optional[Tensor] = None,
) -> Tensor:
if dtype is None:
dtype = input.dtype
dim_ = _canonical_dim(dim, input.ndim)[0]
# TODO: eliminate mask_input as unnecessary when using masked divide.
mask_input = _combine_input_and_mask(sum, input, mask)
if input.layout == torch.strided:
nrm_ = norm(input, ord, dim, keepdim=True, dtype=dtype, mask=mask)
# TODO: replace torch.maximum with masked maximum when available.
denom = torch.maximum(nrm_, nrm_.new_full([], eps))
# TODO: replace torch.divide with masked divide when available.
return torch.divide(mask_input, denom)
else:
raise ValueError(
f"masked normalize expects strided tensor (got {input.layout} tensor)"
)
|
pytorch-master
|
torch/_masked/__init__.py
|
# -*- coding: utf-8 -*-
# This file is generated, do not modify it!
#
# To update this file, run the update masked docs script as follows:
#
# python tools/update_masked_docs.py
#
# The script must be called from an environment where the development
# version of torch package can be imported and is functional.
#
amax_docstring = """amax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns maximum of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of maximum operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in maximum computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of maximum operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.amax(input, 1, mask=mask)
tensor([ -1, -9223372036854775808])
"""
amin_docstring = """amin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns minimum of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of minimum operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in minimum computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of minimum operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.amin(input, 1, mask=mask)
tensor([ -3, 9223372036854775807])
"""
argmax_docstring = """argmax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns argmax of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of argmax operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in argmax computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of argmax operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which argmax is computed.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.argmax(input, 1, mask=mask)
tensor([2, 0])
"""
argmin_docstring = """argmin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns argmin of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of argmin operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in argmin computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of argmin operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which argmin is computed.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.argmin(input, 1, mask=mask)
tensor([0, 0])
"""
cumprod_docstring = """cumprod(input, dim, *, dtype=None, mask=None) -> Tensor
Returns cumulative_prod of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``prod(x[:i])``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
cumulative_prod computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the cumulative_prod output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which cumulative_prod is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.cumprod(input, 1, mask=mask)
tensor([[-3., -3., 3.],
[ 1., 1., 1.]])
"""
cumsum_docstring = """cumsum(input, dim, *, dtype=None, mask=None) -> Tensor
Returns cumulative_sum of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``sum(x[:i])``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
cumulative_sum computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the cumulative_sum output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which cumulative_sum is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.cumsum(input, 1, mask=mask)
tensor([[-3., -3., -4.],
[ 0., 0., 0.]])
"""
log_softmax_docstring = """log_softmax(input, dim, *, dtype=None, mask=None) -> Tensor
Returns log_softmax of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is
defined as ``log(exp(x[i])/sum(exp(x)))``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
log_softmax computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the log_softmax output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which log_softmax is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.log_softmax(input, 1, mask=mask)
tensor([[-2.1269, -inf, -0.1269],
[ nan, nan, nan]])
"""
logsumexp_docstring = """logsumexp(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns logsumexp of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of logsumexp operation, which is used to start the reduction, is ``-2147483648``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in logsumexp computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of logsumexp operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.logsumexp(input, 1, mask=mask)
tensor([ 0, -9223372036854775808])
"""
mean_docstring = """mean(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns mean of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
By definition, the identity value of a mean operation is the mean
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
mean is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in mean computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of mean operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.mean(input, 1, mask=mask)
tensor([-2., nan])
"""
median_docstring = """median(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns median of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
By definition, the identity value of a median operation is the median
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
median is undefined. Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in median computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of median operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which median is computed.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.median(input, 1, mask=mask)
tensor([-3., nan])
"""
norm_docstring = """norm(input, ord, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns norm of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of norm operation, which is used to start the
reduction, is ``0.0``, except for ``ord=-inf`` it is
``inf``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in norm computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of norm operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
ord (int, float, optional): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.norm(input, 2.0, 1, mask=mask)
tensor([3.1623, 0.0000])
"""
normalize_docstring = """normalize(input, ord, dim, *, eps=1e-12, dtype=None, mask=None) -> Tensor
Returns normalize of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Normalize of i-th element in ``x`` is
defined as ``x[i]/max(norm(x, p), eps)``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
normalize computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the normalize output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
ord (int, float): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.
dim (int): the dimension along which normalize is computed.
Keyword args:
eps (float, optional): small value to avoid division by zero. Default: 1e-12.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.normalize(input, 2.0, 1, mask=mask)
tensor([[-0.9487, 0.0000, -0.3162],
[ 0.0000, 0.0000, 0.0000]])
"""
prod_docstring = """prod(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns product of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of product operation, which is used to start the reduction, is ``1``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in product computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of product operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.prod(input, 1, mask=mask)
tensor([3, 1])
"""
softmax_docstring = """softmax(input, dim, *, dtype=None, mask=None) -> Tensor
Returns softmax of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmax of i-th element in ``x`` is
defined as ``exp(x[i])/sum(exp(x))``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
softmax computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the softmax output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which softmax is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.softmax(input, 1, mask=mask)
tensor([[0.1192, 0.0000, 0.8808],
[ nan, nan, nan]])
"""
softmin_docstring = """softmin(input, dim, *, dtype=None, mask=None) -> Tensor
Returns softmin of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.
Let ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmin of i-th element in ``x`` is
defined as ``exp(-x[i])/sum(exp(-x))``.
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
softmin computation, otherwise the element is ignored.
The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.
The mask of the softmin output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int): the dimension along which softmin is computed.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]])
>>> input
tensor([[-3., -2., -1.],
[ 0., 1., 2.]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.softmin(input, 1, mask=mask)
tensor([[0.8808, 0.0000, 0.1192],
[ nan, nan, nan]])
"""
std_docstring = """std(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns standard_deviation of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of sample standard deviation operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in standard_deviation computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of standard_deviation operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
unbiased (bool): when True, use Bessel’s correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.std(input, 1, False, mask=mask)
tensor([1., nan])
"""
sum_docstring = """sum(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns sum of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of sum operation, which is used to start the reduction, is ``0``.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in sum computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of sum operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.sum(input, 1, mask=mask)
tensor([-4, 0])
"""
var_docstring = """var(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor
Returns variance of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.
The identity value of sample variance operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).
The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in variance computation, otherwise the element is
ignored.
When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of variance operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.
The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.
Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
unbiased (bool): when True, use Bessel’s correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
keepdim (bool, optional): whether the output tensor has
:attr:`dim` retained or not. Default: False.
dtype (:class:`torch.dtype`, optional): the desired data type
of returned tensor. If specified, the input tensor is
casted to :attr:`dtype` before the operation is
performed. Default: None.
mask (:class:`torch.Tensor`, optional): the boolean tensor
containing the binary mask of validity of input tensor
elements.
Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
Example::
>>> input = tensor([[-3, -2, -1], [ 0, 1, 2]])
>>> input
tensor([[-3, -2, -1],
[ 0, 1, 2]])
>>> mask = tensor([[ True, False, True], [False, False, False]])
>>> mask
tensor([[ True, False, True],
[False, False, False]])
>>> torch._masked.var(input, 1, False, mask=mask)
tensor([1., nan])
"""
|
pytorch-master
|
torch/_masked/_docs.py
|
pytorch-master
|
torch/ao/__init__.py
|
|
# Variables
from ._mappings import get_dynamic_sparse_quantized_mapping
from ._mappings import get_static_sparse_quantized_mapping
# Sparsifier
from .sparsifier.base_sparsifier import BaseSparsifier
from .sparsifier.weight_norm_sparsifier import WeightNormSparsifier
from .sparsifier.nearly_diagonal_sparsifier import NearlyDiagonalSparsifier
# Scheduler
from .scheduler.base_scheduler import BaseScheduler
from .scheduler.lambda_scheduler import LambdaSL
# Parametrizations
from .sparsifier.utils import FakeSparsity
from .sparsifier.utils import module_to_fqn
from .sparsifier.utils import fqn_to_module
from .sparsifier.utils import get_arg_info_from_tensor_fqn
|
pytorch-master
|
torch/ao/sparsity/__init__.py
|
import torch
import torch.ao.nn
def get_static_sparse_quantized_mapping():
_static_sparse_quantized_mapping = dict({
torch.nn.Linear: torch.ao.nn.sparse.quantized.Linear,
})
return _static_sparse_quantized_mapping
def get_dynamic_sparse_quantized_mapping():
_dynamic_sparse_quantized_mapping = dict({
torch.nn.Linear: torch.ao.nn.sparse.quantized.dynamic.Linear,
})
return _dynamic_sparse_quantized_mapping
|
pytorch-master
|
torch/ao/sparsity/_mappings.py
|
from functools import reduce
from typing import Tuple
import torch
import torch.nn.functional as F
from .base_sparsifier import BaseSparsifier
__all__ = ["WeightNormSparsifier"]
def _flat_idx_to_2d(idx, shape):
rows = idx // shape[1]
cols = idx % shape[1]
return rows, cols
class WeightNormSparsifier(BaseSparsifier):
r"""Weight-Norm Sparsifier
This sparsifier computes the norm of every sparse block and "zeroes-out" the
ones with the lowest norm. The level of sparsity defines how many of the
blocks is removed.
This sparsifier is controlled by three variables:
1. `sparsity_level` defines the number of *sparse blocks* that are zeroed-out
2. `sparse_block_shape` defines the shape of the sparse blocks. Note that
the sparse blocks originate at the zero-index of the tensor.
3. `zeros_per_block` is the number of zeros that we are expecting in each
sparse block. By default we assume that all elements within a block are
zeroed-out. However, setting this variable sets the target number of
zeros per block. The zeros within each block are chosen as the *smallest
absolute values*.
Args:
sparsity_level: The target level of sparsity
sparse_block_shape: The shape of a sparse block (see note below)
zeros_per_block: Number of zeros in a sparse block
Note::
The `sparse_block_shape` is tuple representing (block_ROWS, block_COLS),
irrespective of what the rows / cols mean in the data tensor. That means,
if you were to sparsify a weight tensor in the nn.Linear, which has a
weight shape `(Cout, Cin)`, the `block_ROWS` would refer to the output
channels, while the `block_COLS` would refer to the input channels.
Note::
All arguments to the WeightNormSparsifier constructor are "default"
arguments and could be overriden by the configuration provided in the
`prepare` step.
"""
def __init__(self,
sparsity_level: float = 0.5,
sparse_block_shape: Tuple[int, int] = (1, 4),
zeros_per_block: int = None):
if zeros_per_block is None:
zeros_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
defaults = {
"sparsity_level": sparsity_level,
"sparse_block_shape": sparse_block_shape,
"zeros_per_block": zeros_per_block,
}
super().__init__(defaults=defaults)
def _scatter_fold_block_mask(self, output_shape, dim, indices, block_shape,
mask=None, input_shape=None, device=None):
r"""Creates patches of size `block_shape` after scattering the indices."""
if mask is None:
assert input_shape is not None
mask = torch.ones(input_shape, device=device)
mask.scatter_(dim=dim, index=indices, value=0)
mask.data = F.fold(mask, output_size=output_shape, kernel_size=block_shape, stride=block_shape)
return mask
def _make_tensor_mask(self, data, input_shape, sparsity_level, sparse_block_shape, mask=None):
r"""Creates a tensor-level mask.
Tensor-level mask is described as a mask, where the granularity of sparsification of the
smallest patch is the sparse_block_shape. That means, that for a given mask and a
sparse_block_shape, the smallest "patch" of zeros/ones could be the sparse_block_shape.
In this context, `sparsity_level` describes the fraction of sparse patches.
"""
h, w = data.shape[-2:]
block_h, block_w = sparse_block_shape
dh = (block_h - h % block_h) % block_h
dw = (block_w - w % block_w) % block_w
if mask is None:
mask = torch.ones(h, w, device=data.device)
if sparsity_level >= 1.0:
mask.data = torch.zeros_like(mask)
return mask
elif sparsity_level <= 0.0:
mask.data = torch.ones_like(mask)
return mask
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
if values_per_block > 1:
# Reduce the data
data = F.avg_pool2d(
data[None, None, :], kernel_size=sparse_block_shape, stride=sparse_block_shape, ceil_mode=True
)
data = data.flatten()
num_blocks = len(data)
data = data.repeat(1, values_per_block, 1)
threshold_idx = int(round(sparsity_level * num_blocks))
threshold_idx = max(0, min(num_blocks - 1, threshold_idx)) # Sanity check
_, sorted_idx = torch.topk(data, k=threshold_idx, dim=2, largest=False)
# Temp reshape for mask
mask_reshape = mask.reshape(data.shape) # data might be reshaped
self._scatter_fold_block_mask(
dim=2, output_shape=(h + dh, w + dw),
indices=sorted_idx, block_shape=sparse_block_shape, mask=mask_reshape
)
mask.data = mask_reshape.squeeze().reshape(mask.shape)[:h, :w].contiguous()
return mask
def _make_block_mask(self, data, sparse_block_shape, zeros_per_block, mask=None):
r"""Creates a block-level mask.
Block-level mask is described as a mask, where the granularity of sparsification of the
largest patch is the sparse_block_shape. That means that for a given mask and a
sparse_block_shape, the sparsity is computed only within a patch of a size sparse_block_shape.
In this context the `zeros_per_block` describes the number of zeroed-out elements within a patch.
"""
if mask is None:
mask = torch.ones(data.shape, device=data.device)
h, w = data.shape[-2:]
block_h, block_w = sparse_block_shape
dh = (block_h - h % block_h) % block_h
dw = (block_w - w % block_w) % block_w
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
if values_per_block == zeros_per_block:
# Everything should be sparsified
mask.data = torch.zeros_like(mask)
return mask
# create a new padded tensor like data (to match the block_shape)
padded_data = torch.ones(h + dh, w + dw, dtype=data.dtype, device=data.device)
padded_data.fill_(torch.nan)
padded_data[:h, :w] = data
unfolded_data = F.unfold(padded_data[None, None, :], kernel_size=sparse_block_shape, stride=sparse_block_shape)
# Temp reshape for mask
mask_reshape = mask.reshape(unfolded_data.shape)
_, sorted_idx = torch.topk(unfolded_data, k=zeros_per_block, dim=1, largest=False)
self._scatter_fold_block_mask(
dim=1, indices=sorted_idx, output_shape=padded_data.shape, block_shape=sparse_block_shape, mask=mask_reshape
)
mask.data = mask_reshape.squeeze().reshape(mask.shape)[:h, :w].contiguous()
return mask
def update_mask(self, module, tensor_name, sparsity_level, sparse_block_shape,
zeros_per_block, **kwargs):
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
if zeros_per_block > values_per_block:
raise ValueError(
"Number of zeros per block cannot be more than " "the total number of elements in that block."
)
if zeros_per_block < 0:
raise ValueError("Number of zeros per block should be positive.")
mask = getattr(module.parametrizations, tensor_name)[0].mask
if sparsity_level <= 0 or zeros_per_block == 0:
mask.data = torch.ones_like(mask)
elif sparsity_level >= 1.0 and (zeros_per_block == values_per_block):
mask.data = torch.zeros_like(mask)
else:
ww = getattr(module, tensor_name)**2
tensor_mask = self._make_tensor_mask(
data=ww, input_shape=ww.shape, sparsity_level=sparsity_level, sparse_block_shape=sparse_block_shape
)
if values_per_block != zeros_per_block:
block_mask = self._make_block_mask(data=ww, sparse_block_shape=sparse_block_shape,
zeros_per_block=zeros_per_block)
tensor_mask = torch.logical_or(tensor_mask, block_mask)
mask.data = tensor_mask
|
pytorch-master
|
torch/ao/sparsity/sparsifier/weight_norm_sparsifier.py
|
pytorch-master
|
torch/ao/sparsity/sparsifier/__init__.py
|
|
from typing import Any, Dict, Optional
from torch import nn
__all__ = [
"module_to_fqn",
"fqn_to_module",
"get_arg_info_from_tensor_fqn",
"FakeSparsity",
]
def module_to_fqn(model: nn.Module, module: nn.Module, prefix: str = "") -> Optional[str]:
"""
Returns the fqn for a module or None if module not a descendent of model.
"""
if module is model:
return ""
for name, child in model.named_children():
fqn = module_to_fqn(child, module, ".")
if isinstance(fqn, str):
return prefix + name + fqn
return None
def fqn_to_module(model: Optional[nn.Module], path: str) -> Optional[nn.Module]:
"""
Given an fqn, returns the corresponding module or tensor or None if the fqn given by `path`
doesn't correspond to anything. Similar to model.get_submodule(path) but works for tensors.
"""
if path != "":
for name in path.split("."):
model = getattr(model, name, None)
return model
def get_arg_info_from_tensor_fqn(model: nn.Module, tensor_fqn: str) -> Dict[str, Any]:
"""
Uses tensor_fqn to obtain a dict containing module_fqn, module and tensor_name
"""
# string manip to split tensor_fqn into module_fqn and tensor_name
# if tensor_fqn is 'weight' then module_fqn and tensor_name are '' and 'weight'
# if tensor_fqn is 'linear.weight' then module_fqn and tensor_name are 'linear' and 'weight'
tensor_name = tensor_fqn.split(".")[-1]
module_fqn = tensor_fqn[: -len(tensor_name) - ("." in tensor_fqn)]
module = fqn_to_module(model, module_fqn)
return {
"module_fqn": module_fqn,
"module": module,
"tensor_name": tensor_name,
"tensor_fqn": tensor_fqn,
}
# Parametrizations
class FakeSparsity(nn.Module):
r"""Parametrization for the weights. Should be attached to the 'weight' or
any other parmeter that requires a mask applied to it.
Note::
Once the mask is passed, the variable should not change the id. The
contents of the mask can change, but the mask reference itself should
not.
"""
def __init__(self, mask):
super().__init__()
self.register_buffer("mask", mask)
def forward(self, x):
assert self.mask.shape == x.shape
return self.mask * x
def state_dict(self, *args, **kwargs):
# We don't want to let the parametrizations to save the mask.
# That way we make sure that the linear module doesn't store the masks
# alongside their parametrizations.
return dict()
|
pytorch-master
|
torch/ao/sparsity/sparsifier/utils.py
|
import torch
from . import base_sparsifier
class NearlyDiagonalSparsifier(base_sparsifier.BaseSparsifier):
r"""Nearly Diagonal Sparsifier
This sparsifier creates a nearly diagonal mask to be applied to the weight matrix.
Nearly Diagonal Matrix is a matrix that contains non-zero elements near the diagonal and the rest are zero.
An example of a nearly diagonal matrix with degree (or nearliness) 3 and 5 are follows respectively.
1 1 0 0 1 1 1 0
1 1 1 0 1 1 1 1
0 1 1 1 1 1 1 1
0 0 1 1 0 1 1 1
Note that a nearly diagonal matrix with degree 1 is just a matrix with main diagonal populated
This sparsifier is controlled by one variable:
1. `nearliness` defines the number of non-zero diagonal lines that are closest to the main diagonal.
Currently - supports only odd number
Note:
This can be accelerated (vectorized) once the Spdiagonal feature (PR: #78439) is landed or the banded matrix
feature is landed: https://stackoverflow.com/questions/52463972/generating-banded-matrices-using-numpy
Args:
nearliness: The degree of nearliness (default = 1)
"""
def __init__(self, nearliness: int = 1):
defaults = {'nearliness': nearliness}
super().__init__(defaults=defaults)
def update_mask(self, module, tensor_name, nearliness,
**kwargs):
mask = getattr(module.parametrizations, tensor_name)[0].mask
mask.data = torch.zeros_like(mask)
if nearliness <= 0:
return
tensor = getattr(module, tensor_name)
height, width = tensor.shape
if nearliness % 2 == 0:
raise ValueError("nearliness can only be an odd number")
dist_to_diagonal = nearliness // 2
# check
if dist_to_diagonal >= min(height, width):
raise ValueError("nearliness cannot be larger than the dimensions of tensor.")
for row in range(0, height):
# Bounds of entries that needs to be set to 1
low = max(0, row - dist_to_diagonal)
high = min(width, row + dist_to_diagonal + 1)
mask[row, low:high].fill_(1)
|
pytorch-master
|
torch/ao/sparsity/sparsifier/nearly_diagonal_sparsifier.py
|
import abc
import copy
from collections import defaultdict
from typing import Any, Dict, Optional, Set, Tuple, List, Type
import torch
from torch import nn
from torch.nn.utils import parametrize
from .utils import (
FakeSparsity,
get_arg_info_from_tensor_fqn,
module_to_fqn,
)
__all__ = ["BaseSparsifier"]
SUPPORTED_MODULES = {
nn.Linear
}
KEYS_NOT_IN_STATE_DICT = ["module", "module_fqn", "tensor_name"]
__all__ = ["BaseSparsifier"]
# TODO update desc with new config args
class BaseSparsifier(abc.ABC):
r"""Base class for all sparsifiers.
Abstract methods that need to be implemented:
- update_mask: Function to compute a new mask for all keys in the
`groups`.
Args:
- model [nn.Module]: model to configure. The model itself is not saved
but used for the state_dict saving / loading.
- config [list]: configuration elements should be a dict map that includes
`tensor_fqn` of tensors to sparsify
- defaults [dict]: default configurations will be attached to the
configuration. Only the keys that don't exist in the `config` will
be updated.
Example::
>>> # xdoctest: +SKIP("Can't instantiate abstract class BaseSparsifier with abstract method update_mask")
>>> config = [{'tensor_fqn': 'layer1.weight', 'tensor_fqn': 'linear2.weight2', 'sparsity_level': 0.5}]
>>> defaults = {'sparsity_level': 0.7}
>>> # model.layer1.weight will have `sparsity_level` = 0.7 (getting default)
>>> sparsifier = BaseSparsifier(config, defaults)
"""
def __init__(self, defaults: Optional[Dict[str, Any]] = None):
super().__init__()
self.defaults: Dict[str, Any] = defaults or dict()
self.state: Dict[str, Dict] = defaultdict(dict)
self.groups: List[Dict[str, Any]] = []
self.enable_mask_update = True
def __getstate__(self) -> Dict[str, Any]:
return {
'defaults': self.defaults,
'state': self.state,
'groups': self.groups,
}
def __setstate__(self, state: Dict[str, Dict[str, Any]]) -> None:
self.__dict__.update(state)
def __repr__(self):
format_string = self.__class__.__name__ + ' ('
for i, sparse_args in enumerate(self.groups):
module = sparse_args['module']
format_string += '\n'
format_string += f'\tGroup {i}\n'
format_string += f'\t module: {module}\n'
for key in sorted(sparse_args.keys()):
if key == "module":
continue
format_string += f"\t {key}: {sparse_args[key]}\n"
format_string += ")"
return format_string
def state_dict(self) -> Dict[str, Any]:
r"""Returns the state of the optimizer as a :class:`dict`.
It contains:
* state - current state of the sparsification.
* groups - a list containing all sparsity configuration groups
with the key 'tensor_fqn' specifying the path to the sparsified tensor within a model
TODO: Need a clean way of loading the state of the "prepared" module
"""
groups: List[Dict[str, Any]] = [
dict(filter(lambda key_value: key_value[0] not in KEYS_NOT_IN_STATE_DICT , mg.items()))
for mg in self.groups
]
return {
'state': self.state,
'groups': groups,
}
def load_state_dict(self, state_dict: Dict[str, Any], strict: bool = True):
groups = copy.deepcopy(state_dict['groups'])
states = state_dict['state']
for tensor_fqn, s in states.items():
arg_info = get_arg_info_from_tensor_fqn(self.model, tensor_fqn)
module = arg_info["module"]
tensor_name = arg_info["tensor_name"]
if strict and module is None:
raise RuntimeError(f"Error loading {tensor_fqn} into the model")
found = False
for p in module.parametrizations[tensor_name]:
if isinstance(p, FakeSparsity):
found = True
break
if not found:
p = FakeSparsity(torch.ones(getattr(module, tensor_name).shape))
parametrize.register_parametrization(module, tensor_name, p)
if s.get("mask", None) is not None:
mask = s.pop("mask")
p.mask = mask
for mg in groups:
if mg["tensor_fqn"] == tensor_fqn:
mg.update(arg_info)
self.__setstate__({"state": states, "groups": groups})
def make_config_from_model(
self,
model: nn.Module,
SUPPORTED_MODULES: Set[Type] = SUPPORTED_MODULES,
) -> None:
self.config = []
stack = [model]
while stack:
module = stack.pop()
for name, child in module.named_children():
if type(child) in SUPPORTED_MODULES:
module_fqn = module_to_fqn(model, child)
assert isinstance(module_fqn, str) # for mypy
self.config.append(
{"tensor_fqn": module_fqn + ".weight"}
)
else:
stack.append(child)
def prepare(self, model, config):
r"""Prepares a model, by adding the parametrizations.
Note::
The model is modified inplace. If you need to preserve the original
model, use copy.deepcopy.
"""
self.model = model # TODO: Need to figure out how to load without this.
self.config = config
# If no config -- try getting all the supported layers
if self.config is None:
self.make_config_from_model(model)
# TODO: Remove the configuration by reference ('module')
for module_config in self.config:
assert isinstance(module_config, dict), (
"config elements should be dicts not modules i.e.:"
"[{`tensor_fqn`: `foo.bar.weight`}, {`tensor_fqn`: ... }, ...]"
)
assert isinstance(self.defaults, Dict) # for mypy
local_args = copy.deepcopy(self.defaults)
local_args.update(module_config)
tensor_fqn = local_args.get("tensor_fqn", None)
assert tensor_fqn is not None, (
"tensor_fqn is a required argument in the sparsity config which"
"replaces previous `module` and [module]`fqn` arguments"
)
# populate all information from tensor_fqn
info_from_tensor_fqn = get_arg_info_from_tensor_fqn(model, tensor_fqn)
# check that whatever was put into local_args agrees with what was obtained
# from tensor_fqn
for key in info_from_tensor_fqn.keys():
if key in local_args:
assert (
info_from_tensor_fqn[key] == local_args[key]
or (
key == "tensor_fqn"
and "." + info_from_tensor_fqn[key] == local_args[key]
)
# info_from_tensor_fqn will chop leading '.' from tensor_fqn so ignore that
), (
"Given both `{}` and `tensor_fqn` in the config, it is expected them to "
"agree!".format(key)
)
local_args.update(info_from_tensor_fqn)
self.groups.append(local_args)
self._prepare()
def _prepare(self, *args, **kwargs):
r"""Adds mask parametrization to the layer weight
"""
for config in self.groups:
module = config['module']
tensor_name = config['tensor_name']
parametrization = config.get('parametrization', FakeSparsity)
mask = config.get('mask', torch.ones_like(getattr(module, tensor_name)))
self.state[config['tensor_fqn']]['mask'] = mask
parametrize.register_parametrization(module, tensor_name, parametrization(mask))
def squash_mask(self,
params_to_keep: Optional[Tuple[str, ...]] = None,
params_to_keep_per_layer: Optional[Dict[str, Tuple[str, ...]]] = None,
*args, **kwargs):
r"""Squashes the sparse masks into the appropriate tensors.
If either the `params_to_keep` or `params_to_keep_per_layer` is set,
the module will have a `sparse_params` dict attached to it.
Args:
params_to_keep: List of keys to save in the module or a dict
representing the modules and keys that will have
sparsity parameters saved
params_to_keep_per_layer: Dict to specify the params that should be
saved for specific layers. The keys in the dict
should be the module fqn, while the values should
be a list of strings with the names of the variables
to save in the `sparse_params`
Examples:
>>> # xdoctest: +SKIP("locals are undefined")
>>> # Don't save any sparse params
>>> sparsifier.squash_mask()
>>> hasattr(model.submodule1, 'sparse_params')
False
>>> # Keep sparse params per layer
>>> sparsifier.squash_mask(
... params_to_keep_per_layer={
... 'submodule1.linear1': ('foo', 'bar'),
... 'submodule2.linear42': ('baz',)
... })
>>> print(model.submodule1.linear1.sparse_params)
{'foo': 42, 'bar': 24}
>>> print(model.submodule2.linear42.sparse_params)
{'baz': 0.1}
>>> # Keep sparse params for all layers
>>> sparsifier.squash_mask(params_to_keep=('foo', 'bar'))
>>> print(model.submodule1.linear1.sparse_params)
{'foo': 42, 'bar': 24}
>>> print(model.submodule2.linear42.sparse_params)
{'foo': 42, 'bar': 24}
>>> # Keep some sparse params for all layers, and specific ones for
>>> # some other layers
>>> sparsifier.squash_mask(
... params_to_keep=('foo', 'bar'),
... params_to_keep_per_layer={
... 'submodule2.linear42': ('baz',)
... })
>>> print(model.submodule1.linear1.sparse_params)
{'foo': 42, 'bar': 24}
>>> print(model.submodule2.linear42.sparse_params)
{'foo': 42, 'bar': 24, 'baz': 0.1}
"""
for config in self.groups:
module = config['module']
tensor_name = config['tensor_name']
parametrize.remove_parametrizations(module, tensor_name,
leave_parametrized=True)
sparse_params = dict()
if params_to_keep is not None:
global_params = {k: config[k] for k in params_to_keep}
sparse_params.update(global_params)
if params_to_keep_per_layer is not None:
params = params_to_keep_per_layer.get(config["module_fqn"], None)
if params is not None:
per_layer_params = {k: config[k] for k in params}
sparse_params.update(per_layer_params)
if sparse_params:
# TODO handle multiple tensor being quantized on a single module, where to store sparse_params?
module.sparse_params = sparse_params
def convert(self):
# TODO: Call the torch.ao.utils.convert in here
raise NotImplementedError(
"`convert` is not implemented. Please, use "
"`torch.ao.utils.convert` instead."
)
def step(self, use_path: bool = True) -> None:
if not self.enable_mask_update:
return
with torch.no_grad():
for config in self.groups:
self.update_mask(**config)
@abc.abstractmethod
def update_mask(self, module: nn.Module, tensor_name: str, **kwargs):
pass
|
pytorch-master
|
torch/ao/sparsity/sparsifier/base_sparsifier.py
|
pytorch-master
|
torch/ao/sparsity/_experimental/__init__.py
|
|
import abc
import torch
from typing import Optional, Tuple, List, Any, Dict
from ...sparsifier import base_sparsifier
from collections import defaultdict
from torch import nn
import copy
from ...sparsifier import utils
from torch.nn.utils import parametrize
import sys
import warnings
if not sys.warnoptions:
# to suppress repeated warnings when being used in a training loop.
warnings.simplefilter("once")
__all__ = ['BaseDataSparsifier']
EMBEDDING_TYPES = {
nn.Embedding,
nn.EmbeddingBag,
}
SUPPORTED_TYPES = {
torch.Tensor,
nn.Parameter,
*EMBEDDING_TYPES,
}
class _Container(nn.Module):
def __init__(self):
super().__init__()
class BaseDataSparsifier(base_sparsifier.BaseSparsifier):
r"""
Base Data Sparsifier class for all Data sparsifiers.
The abstract class accepts raw torch tensors / embedding / embedding bags (refer to SUPPORTED_TYPES above)
to prepare for sparsification.
In this case, mask (and parametrizations) is owned by the class and not by the user.
Specifically, the container object inside the class maintains the mask and parametrizations of the input data
Args:
data_list (list of tuples)
list of (name, data) tuples to sparsify. Lookup SUPPORTED_TYPES
for type of data. Internally, a container module handles the data sparsification.
defaults (dict)
default configurations will be attached to the
configuration. Only the keys that don't exist in the `config` will
be updated.
Example::
>>> # xdoctest: +SKIP
>>> data_list = [('tensor_1', torch.randn(3,3)), ('tensor_2', torch.randn(4,4))]
>>> defaults = {'sparsity_level': 0.7}
>>> sparsifier = DerivedDataSparsifier(data_list = data_list, **defaults) # Some sparsifier that inherits BaseDataSparsifier
>>> new_tensor_to_add = {'name': 'tensor_3', 'data': torch.randn(5,5), 'sparsity_level': 0.3}
>>> sparsifier.add_data(**new_tensor_to_add)
>>> # tensor_1 and tensor_2 will have sparsity_level of 0.7 but tensor_3 will have sparsity_level=0.3
"""
def __init__(self, data_list: Optional[List[Tuple[str, Any]]] = None, **defaults):
super().__init__(defaults=defaults)
self._container = _Container()
self.data_groups: Dict[str, Dict] = defaultdict(dict) # name -> {**config}
if data_list is not None:
# add data with default config here
[self.add_data(name, data, **self.defaults) for name, data in data_list]
def prepare(self):
raise NotImplementedError("this function is undefined for this class")
def _extract_weight(self, data):
# extract the weight parameter instead of underlying data
if type(data) in [torch.Tensor, nn.Parameter]:
return data
elif type(data) in EMBEDDING_TYPES:
return data.weight
def add_data(self, name: str, data, reuse_mask=True, **config):
r""" Configures and parametrizes the internal container model with name and data.
**Note**:
1. If the data with name already exists, it replaces the data.
2. While replacing, the old mask is reused when `reuse_mask=True`
3. If `reuse_mask=True`, then the replacing data needs to have the same shape as that of old data.
4. By default, the config of the replaced data is used as config for the replacing data, unless something
is specified in the config dictionary.
"""
assert type(data) in SUPPORTED_TYPES, \
"specified data type not supported at the moment"
local_args = copy.deepcopy(self.defaults)
local_args.update(config)
weight = self._extract_weight(data)
# Bookkeeping in the container class
mask = local_args.get('mask', torch.ones_like(weight))
param_class = local_args.get('parametrization', utils.FakeSparsity)
if name in self.state:
# If the named data already exists - replace
warnings.warn("Replacing existing data of the same name. - Did you mean a different name?")
# reuse old config
old_args = self.data_groups[name]
local_args = copy.deepcopy(old_args)
local_args.update(config)
if reuse_mask:
current_data = self.get_data(name=name)
assert weight.shape == current_data.shape, \
"to retain the old mask, the shape of the new data must be the same as the previous one"
mask = self.get_mask(name=name) # reuse mask instead of creating a new one
self._delete_data(name=name)
# parameter creates a deepcopy of the weight inside, so create a buffer
self._container.register_buffer(name=name, tensor=weight)
parametrize.register_parametrization(self._container, name, param_class(mask))
self.state[name]['mask'] = mask
self.data_groups[name] = local_args
return getattr(self._container, name)
def get_data(self, name: str, return_original: bool = True):
r"""Returns weight tensor (or data)
Args:
- name: name of the data to be returned
- return_original returns weight tensor without applying parametrization if True
else - returns the sparsified version (parametrized)
"""
if name not in self.data_groups:
raise ValueError("data with specified name does not exist")
if return_original:
if not parametrize.is_parametrized(self._container, name):
raise ValueError("mask squashed - original mask value does not exist")
data = getattr(self._container.parametrizations, name).original
return data
else:
return getattr(self._container, name)
def _convert_mask(self, states, sparse_coo=True):
r"""Converts the mask to sparse coo or dense tensors depending on the `sparse_coo` argument.
"""
states = copy.deepcopy(states)
for _, state in states.items():
if sparse_coo:
state['mask'] = state['mask'].to_sparse_coo()
else:
state['mask'] = state['mask'].to_dense()
return states
def state_dict(self):
r"""Returns the state of the optimizer as a :class:`dict`.
It contains:
* state - contains name -> mask mapping.
* data_groups - a list containing all sparsity configuration groups
with the key name specifying the name of the data
* container_state_dict - the state dictionary of the internal
container model used for sparsification
"""
state = self._convert_mask(self.state)
return {
'state': state,
'data_groups': self.data_groups,
'_container': self._container.state_dict()
}
def _load_container_from_state(self, states, data_groups, container_state_dict):
r"""This restores the state of the container specifically based on the data present in state and data_groups
If the data was parametrized, then the data would be added to the container and then parametrized,
else it would just add the attribute the container.
"""
for name, state in states.items():
config_name = data_groups.get(name, None)
if config_name is None:
raise RuntimeError(f"Error loading {name}")
# check if the data with such a name was parametrized, if so parametrize
# otherwise just set the attribute and continue
parametrized_name = f'parametrizations.{name}.original'
parametrized = False
data = container_state_dict.get(name, None)
if name in container_state_dict:
# the parametrization was probably removed for this
data = container_state_dict.get(name)
elif parametrized_name in container_state_dict:
# so the weight was parametrized
data = container_state_dict.get(parametrized_name)
parametrized = True
else:
raise RuntimeError(f"Error loading {name}")
self._container.register_buffer(name=name, tensor=data)
if parametrized:
# register parameter if parametrized
mask = state.get('mask', torch.ones_like(data))
param_class = data_groups.get('parametrization', utils.FakeSparsity) # change once public_api for utils is fixed!
parametrize.register_parametrization(self._container, name, param_class(mask))
def load_state_dict(self, state_dict, strict=True):
r"""The load_state_dict() restores the state of the sparsifier based on the state_dict
Args:
* state_dict - the dictionary that to which the current sparsifier needs to be restored to
* strict - If True - the sparsifier is reset and is restored exactly to the state in state_dict.
If False - the current sparsifier is not reset before loading the state_dict i.e. data added
before loading the state_dict is not erased.
"""
states = copy.deepcopy(state_dict['state'])
data_groups = copy.deepcopy(state_dict['data_groups'])
container_state_dict = copy.deepcopy(state_dict['_container'])
states = self._convert_mask(states, sparse_coo=False) # convert sparse coo mask to dense
if strict:
# if strict load -> then reset container
self._container = _Container()
self._load_container_from_state(states, data_groups, container_state_dict)
if not strict:
states.update(self.state)
data_groups.update(self.data_groups)
self.__setstate__({'state': states, 'data_groups': data_groups})
def __setstate__(self, state):
if '_container' in state: # If container object is in state then load model
container_dict = state.pop('_container')
self._container = _Container()
state['state'] = self._convert_mask(state['state'], sparse_coo=False) # convert sparse coo mask to dense
self._load_container_from_state(state['state'], state['data_groups'], container_dict)
self.__dict__.update(state)
def __getstate__(self):
state = self._convert_mask(self.state)
return {
'defaults': self.defaults,
'state': state,
'data_groups': self.data_groups,
'_container': self._container.state_dict()
}
def __repr__(self):
format_string = self.__class__.__name__ + ' ('
for name, sparse_args in self.data_groups.items():
format_string += '\n'
format_string += '\tData Group\n'
format_string += f'\t name: {name}\n'
for key in sorted(sparse_args.keys()):
if key == 'data':
continue
format_string += f'\t {key}: {sparse_args[key]}\n'
format_string += ')'
return format_string
def get_mask(self, name: str):
if name not in self.state:
raise ValueError("data with specified name does not exist")
return self.state[name]['mask']
def squash_mask(self, *args, leave_parametrized=True, names=None, **kwargs):
r"""Squashes the sparse masks into the appropriate tensors. Also, accepts list of strings
to squash mask for. If none, squashes mask for all the keys
kwargs:
* names: list of strings to squash mask for
* sparsified: if true - applies the mask before squashing
if false - does not apply the mask before squashing
"""
if names is None:
names = list(self.data_groups.keys())
for name in names:
parametrize.remove_parametrizations(self._container, name, leave_parametrized=leave_parametrized)
def step(self):
if not self.enable_mask_update:
return
with torch.no_grad():
for name, config in self.data_groups.items():
# get non-sparsified data
data = self.get_data(name)
# need name for the mask otherwise can directly pass mask?
self.update_mask(name, data, **config)
@abc.abstractmethod
def update_mask(self, name, data, **kwargs):
pass
def _delete_data(self, name):
"""Detaches some data from the sparsifier.
Args:
name (str)
Name of the data to be removed from the sparsifier
Note:
Currently private. Kind of used as a helper function when replacing data of the same name
"""
self.squash_mask(names=[name], leave_parametrized=False) # do not apply the mask while deleting
delattr(self._container, name)
self.state.pop(name)
self.data_groups.pop(name)
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/base_data_sparsifier.py
|
from .base_data_sparsifier import BaseDataSparsifier
from .data_norm_sparsifier import DataNormSparsifier
__all__ = [
"BaseDataSparsifier",
"DataNormSparsifier",
]
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/__init__.py
|
import torch
from torch.nn import functional as F
from functools import reduce
from typing import Tuple, Any, List
from .base_data_sparsifier import BaseDataSparsifier
__all__ = ['DataNormSparsifier']
class DataNormSparsifier(BaseDataSparsifier):
r"""L1-Norm Sparsifier
This sparsifier computes the *L1-norm* of every sparse block and "zeroes-out" the
ones with the lowest norm. The level of sparsity defines how many of the
blocks is removed.
This sparsifier is controlled by three variables:
1. `sparsity_level` defines the number of *sparse blocks* that are zeroed-out
2. `sparse_block_shape` defines the shape of the sparse blocks. Note that
the sparse blocks originate at the zero-index of the tensor.
3. `zeros_per_block` is the number of zeros that we are expecting in each
sparse block. By default we assume that all elements within a block are
zeroed-out. However, setting this variable sets the target number of
zeros per block. The zeros within each block are chosen as the *smallest
absolute values*.
Args:
sparsity_level: The target level of sparsity
sparse_block_shape: The shape of a sparse block
zeros_per_block: Number of zeros in a sparse block
Note::
All arguments to the DataNormSparsifier constructor are "default"
arguments and could be overriden by the configuration provided in the
`add_data` step.
"""
def __init__(self, data_list: List[Tuple[str, Any]] = None, sparsity_level: float = 0.5,
sparse_block_shape: Tuple[int, int] = (1, 4),
zeros_per_block: int = None, norm: str = 'L1'):
if zeros_per_block is None:
zeros_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
assert norm in ['L1', 'L2'], "only L1 and L2 norm supported at the moment"
defaults = {'sparsity_level': sparsity_level, 'sparse_block_shape': sparse_block_shape,
'zeros_per_block': zeros_per_block}
self.norm = norm
super().__init__(data_list=data_list, **defaults)
def __get_scatter_folded_mask(self, data, dim, indices, output_size, sparse_block_shape):
mask = torch.ones_like(data)
mask.scatter_(dim=dim, index=indices, value=0) # zeroing out
mask = F.fold(mask, output_size=output_size, kernel_size=sparse_block_shape,
stride=sparse_block_shape)
mask = mask.to(torch.int8)
return mask
def __get_block_level_mask(self, data,
sparse_block_shape, zeros_per_block):
# Assume data is a squeezed tensor
height, width = data.shape[-2], data.shape[-1]
block_height, block_width = sparse_block_shape
values_per_block = block_height * block_width
# just return zeros if zeroing all elements in block
if values_per_block == zeros_per_block:
return torch.zeros_like(data, dtype=torch.int8)
# creating additional height and width to support padding
dh = (block_height - height % block_height) % block_height
dw = (block_width - width % block_width) % block_width
# create a new padded tensor like data (to match the block_shape)
padded_data = torch.ones(height + dh, width + dw, dtype=data.dtype, device=data.device)
padded_data = padded_data * torch.nan # can also be replaced with 0 to stop the removal of edge data
padded_data[0:height, 0:width] = data
unfolded_data = F.unfold(padded_data[None, None, :], kernel_size=sparse_block_shape,
stride=sparse_block_shape)
_, sorted_idx = torch.sort(unfolded_data, dim=1)
sorted_idx = sorted_idx[:, :zeros_per_block, :] # zero out zeros_per_block number of elements
mask = self.__get_scatter_folded_mask(data=unfolded_data, dim=1, indices=sorted_idx, output_size=padded_data.shape,
sparse_block_shape=sparse_block_shape)
mask = mask.squeeze(0).squeeze(0)[:height, :width].contiguous() # remove padding and make contiguous
return mask
def __get_data_level_mask(self, data, sparsity_level,
sparse_block_shape):
height, width = data.shape[-2], data.shape[-1]
block_height, block_width = sparse_block_shape
dh = (block_height - height % block_height) % block_height
dw = (block_width - width % block_width) % block_width
data_norm = F.avg_pool2d(data[None, None, :], kernel_size=sparse_block_shape,
stride=sparse_block_shape, ceil_mode=True)
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
data_norm = data_norm.flatten()
num_blocks = len(data_norm)
data_norm = data_norm.repeat(1, values_per_block, 1) # get similar shape after unfold
_, sorted_idx = torch.sort(data_norm, dim=2)
threshold_idx = round(sparsity_level * num_blocks) # number of blocks to remove
sorted_idx = sorted_idx[:, :, :threshold_idx]
mask = self.__get_scatter_folded_mask(data=data_norm, dim=2, indices=sorted_idx,
output_size=(height + dh, width + dw),
sparse_block_shape=sparse_block_shape)
mask = mask.squeeze(0).squeeze(0)[:height, :width] # squeeze only the first 2 dimension
return mask
def update_mask(self, name, data, sparsity_level,
sparse_block_shape, zeros_per_block, **kwargs):
values_per_block = reduce((lambda x, y: x * y), sparse_block_shape)
if zeros_per_block > values_per_block:
raise ValueError("Number of zeros per block cannot be more than "
"the total number of elements in that block.")
if zeros_per_block < 0:
raise ValueError("Number of zeros per block should be positive.")
if self.norm == 'L1':
data_norm = torch.abs(data).squeeze() # absolute value based (L1)
else:
data_norm = (data * data).squeeze() # square every element for L2
if len(data_norm.shape) > 2: # only supports 2 dimenstional data at the moment
raise ValueError("only supports 2-D at the moment")
elif len(data_norm.shape) == 1: # in case the data is bias (or 1D)
data_norm = data_norm[None, :]
mask = self.get_mask(name)
if sparsity_level <= 0 or zeros_per_block == 0:
mask.data = torch.ones_like(mask)
elif sparsity_level >= 1.0 and (zeros_per_block == values_per_block):
mask.data = torch.zeros_like(mask)
# Fetch the high level mask that zeros out entire blocks
data_lvl_mask = self.__get_data_level_mask(data=data_norm, sparsity_level=sparsity_level,
sparse_block_shape=sparse_block_shape)
# Fetch block level mask that zeros out 'zeros_per_block' number of elements in every block
block_lvl_mask = self.__get_block_level_mask(data=data_norm, sparse_block_shape=sparse_block_shape,
zeros_per_block=zeros_per_block)
# zero out the entries inside those blocks whose block is sparsified
mask.data = torch.where(data_lvl_mask == 1, data_lvl_mask, block_lvl_mask)
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/data_norm_sparsifier.py
|
import torch
import torch.nn as nn
from torch.ao.sparsity.sparsifier.utils import module_to_fqn, fqn_to_module
from typing import Dict, List
SUPPORTED_MODULES = {
nn.Embedding,
nn.EmbeddingBag
}
def _fetch_all_embeddings(model):
"""Fetches Embedding and EmbeddingBag modules from the model
"""
embedding_modules = []
stack = [model]
while stack:
module = stack.pop()
for _, child in module.named_children():
fqn_name = module_to_fqn(model, child)
if type(child) in SUPPORTED_MODULES:
embedding_modules.append((fqn_name, child))
else:
stack.append(child)
return embedding_modules
def post_training_sparse_quantize(model,
data_sparsifier_class,
sparsify_first=True,
select_embeddings: List[nn.Module] = None,
**sparse_config):
"""Takes in a model and applies sparsification and quantization to only embeddings & embeddingbags.
The quantization step can happen before or after sparsification depending on the `sparsify_first` argument.
Args:
- model (nn.Module)
model whose embeddings needs to be sparsified
- data_sparsifier_class (type of data sparsifier)
Type of sparsification that needs to be applied to model
- sparsify_first (bool)
if true, sparsifies first and then quantizes
otherwise, quantizes first and then sparsifies.
- select_embeddings (List of Embedding modules)
List of embedding modules to in the model to be sparsified & quantized.
If None, all embedding modules with be sparsified
- sparse_config (Dict)
config that will be passed to the constructor of data sparsifier object.
Note:
1. When `sparsify_first=False`, quantization occurs first followed by sparsification.
- before sparsifying, the embedding layers are dequantized.
- scales and zero-points are saved
- embedding layers are sparsified and `squash_mask` is applied
- embedding weights are requantized using the saved scales and zero-points
2. When `sparsify_first=True`, sparsification occurs first followed by quantization.
- embeddings are sparsified first
- quantization is applied on the sparsified embeddings
"""
data_sparsifier = data_sparsifier_class(**sparse_config)
# if select_embeddings is None, perform it on all embeddings
if select_embeddings is None:
embedding_modules = _fetch_all_embeddings(model)
else:
embedding_modules = []
assert isinstance(select_embeddings, List), "the embedding_modules must be a list of embedding modules"
for emb in select_embeddings:
assert type(emb) in SUPPORTED_MODULES, "the embedding_modules list must be an embedding or embedding bags"
fqn_name = module_to_fqn(model, emb)
assert fqn_name is not None, "the embedding modules must be part of input model"
embedding_modules.append((fqn_name, emb))
if sparsify_first:
# sparsify
for name, emb_module in embedding_modules:
valid_name = name.replace('.', '_')
data_sparsifier.add_data(name=valid_name, data=emb_module)
data_sparsifier.step()
data_sparsifier.squash_mask()
# quantize
for _, emb_module in embedding_modules:
emb_module.qconfig = torch.ao.quantization.float_qparams_weight_only_qconfig
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)
else:
# quantize
for _, emb_module in embedding_modules:
emb_module.qconfig = torch.ao.quantization.float_qparams_weight_only_qconfig
torch.quantization.prepare(model, inplace=True)
torch.quantization.convert(model, inplace=True)
# retrieve scale & zero_points
quantize_params: Dict[str, Dict] = {'scales': {}, 'zero_points': {},
'dequant_weights': {}, 'axis': {},
'dtype': {}}
for name, _ in embedding_modules:
quantized_emb = fqn_to_module(model, name)
assert quantized_emb is not None # satisfy mypy
quantized_weight = quantized_emb.weight() # type: ignore[operator]
quantize_params['scales'][name] = quantized_weight.q_per_channel_scales()
quantize_params['zero_points'][name] = quantized_weight.q_per_channel_zero_points()
quantize_params['dequant_weights'][name] = torch.dequantize(quantized_weight)
quantize_params['axis'][name] = quantized_weight.q_per_channel_axis()
quantize_params['dtype'][name] = quantized_weight.dtype
# attach data to sparsifier
data_sparsifier.add_data(name=name.replace('.', '_'), data=quantize_params['dequant_weights'][name])
data_sparsifier.step()
data_sparsifier.squash_mask()
for name, _ in embedding_modules:
quantized_emb = fqn_to_module(model, name)
assert quantized_emb is not None # satisfy mypy
requantized_vector = torch.quantize_per_channel(quantize_params['dequant_weights'][name],
scales=quantize_params['scales'][name],
zero_points=quantize_params['zero_points'][name],
dtype=quantize_params['dtype'][name],
axis=quantize_params['axis'][name])
quantized_emb.set_weight(requantized_vector) # type: ignore[operator]
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/quantization_utils.py
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/lightning/__init__.py
|
|
from collections import defaultdict
from copy import deepcopy
import torch
from typing import Any, Optional, Dict
import pytorch_lightning as pl # type: ignore[import]
from ._data_sparstity_utils import (
_attach_model_to_data_sparsifier,
_log_sparsified_level,
_get_valid_name
)
class PostTrainingDataSparsity(pl.callbacks.Callback):
"""Lightning callback that enables post-training sparsity.
This callback aims to sparsify the model inside lightning module after training.
**Note that the model is copied and then sparsified, so the existing model is not modified**
The sparsified model can be used for comparison and can be accessed using
<callback_obj>.sparsified
Args:
data_sparsifier_class (some implemented class of BaseDataSparsifier)
The data sparsifier object of this class is created when the
training starts.
Note: Objects should not be passed in here as they are created
once the training completes.
data_sparsifier_args (Dict)
Dictionary of args to be passed to the data sparsifier.
Note: data_list arg should be ignored
Hooks implemented:
on_fit_end()
1. copies the model and attaches it to the sparsifier
2. sparsier step() is called
3. squashes the mask()
"""
def __init__(self, data_sparsifier_class, data_sparsifier_args):
super().__init__()
self.data_sparsifier_class = data_sparsifier_class
self.data_sparsifier_args = data_sparsifier_args
self.data_sparsifier: Any = None
self.sparsified: Optional[torch.nn.Module] = None
def on_fit_end(self, trainer, pl_module) -> None:
self.sparsified = deepcopy(pl_module.model).eval()
self.data_sparsifier = self.data_sparsifier_class(**self.data_sparsifier_args)
_attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier)
self.data_sparsifier.step()
self.data_sparsifier.squash_mask() # currently squashes params for all mask
_log_sparsified_level(self.sparsified, self.data_sparsifier)
class TrainingAwareDataSparsity(pl.callbacks.Callback):
"""Lightning callback that enables in-training sparsity.
This callback aims to sparsify the model inside lightning module during training.
**Note that the model is copied and then sparsified, so the existing model is not modified**
The sparsified model can be used for comparison and can be accessed using
<callback_obj>.sparsified
Args:
data_sparsifier_class (some implemented class of BaseDataSparsifier)
The data sparsifier object of this class is created when the
training starts.
Note: Objects should not be passed in here as they are created
when the training starts.
data_sparsifier_args (Dict)
Dictionary of args to be passed to the data sparsifier.
Note: data_list arg should be ignored
data_scheduler_class (some implemented class of BaseDataScheduler)
The data scheduler of this class is created when the training starts
Note: Objects should not be passed in here as they are created
when the training starts.
data_scheduler_args(Dict)
Dictionary of args to be passed to the data scheduler.
**Note: data_sparsifier arg should be ignored as the recipe
creates and pass sparsifier object into the class**
Hooks implemented:
on_train_start()
Data sparsifier and scheduler objects are created.
Pytorch model attached to the sparsifier
on_train_epoch_start()
Loads the state_dict of the data sparsifier
on_train_epoch_end()
1. Copies the model and attaches it to the sparsifier
2. sparsifier step() and scheduler step()
3. Dump state_dict of the current sparsifier
on_train_end()
squash mask
"""
def __init__(self, data_sparsifier_class, data_sparsifier_args,
data_scheduler_class, data_scheduler_args):
super().__init__()
# data sparsifier objects
self.data_sparsifier_class = data_sparsifier_class
self.data_sparsifier_args = data_sparsifier_args
# scheduler objects
self.data_scheduler_class = data_scheduler_class
self.data_scheduler_args = data_scheduler_args
# fields
self.data_sparsifier: Any = None
self.data_scheduler: Any = None
self.sparsified: Optional[torch.nn.Module] = None
self.data_sparsifier_state_dict: Any = None
def on_train_start(self, trainer, pl_module) -> None:
# create sparsifier
self.data_sparsifier = self.data_sparsifier_class(**self.data_sparsifier_args)
self.sparsified = deepcopy(pl_module.model)
_attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier) # just to populate the base_sl in the scheduler
# create scheduler
args = deepcopy(self.data_scheduler_args)
args['data_sparsifier'] = self.data_sparsifier
self.data_scheduler = self.data_scheduler_class(**args)
def on_train_epoch_start(self, trainer, pl_module):
if self.data_sparsifier_state_dict is None:
return # probably first epoch
# load the existing config for each data
self.data_sparsifier.load_state_dict(self.data_sparsifier_state_dict)
def __create_config_based_on_state(self, pl_module):
config: Dict = defaultdict()
if self.data_sparsifier_state_dict is None:
return config
for name, _ in pl_module.model.named_parameters():
valid_name = _get_valid_name(name)
config[valid_name] = self.data_sparsifier.data_groups[valid_name]
return config
def on_train_epoch_end(self, trainer, pl_module):
self.sparsified = deepcopy(pl_module.model)
config = self.__create_config_based_on_state(pl_module)
# attach model to the data sparsifier
_attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier, config=config)
self.data_sparsifier.step()
self.data_scheduler.step()
self.data_sparsifier_state_dict = self.data_sparsifier.state_dict()
def on_train_end(self, trainer, pl_module):
self.data_sparsifier.squash_mask()
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/lightning/callbacks/data_sparsity.py
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/lightning/callbacks/__init__.py
|
|
import logging
from torch.ao.sparsity._experimental.data_sparsifier.base_data_sparsifier import SUPPORTED_TYPES
logger: logging.Logger = logging.getLogger(__name__)
def _attach_model_to_data_sparsifier(module, data_sparsifier, config=None):
"""Attaches a data sparsifier to all the layers of the module.
Essentialy, loop over all the weight parameters in the module and
attach it to the data sparsifier.
Note::
The '.' in the layer names are replaced with '_' (refer to _get_valid_name() below)
before attaching to the sparsifier. This is because, the data
sparsifier uses a dummy model inside to store the weight parameters.
"""
if config is None:
config = {}
for name, parameter in module.named_parameters():
if type(parameter) in SUPPORTED_TYPES:
valid_name = _get_valid_name(name)
# will be defaulted to default configs
data_sparsifier.add_data(name=valid_name, data=parameter, **config.get(valid_name, {}))
def _get_valid_name(name):
return name.replace('.', '_') # . is not allowed as a name
def _log_sparsified_level(model, data_sparsifier) -> None:
# Show the level of sparsity AFTER step:
for name, parameter in model.named_parameters():
if not (type(parameter) in SUPPORTED_TYPES):
continue
valid_name = _get_valid_name(name)
mask = data_sparsifier.get_mask(name=valid_name)
sparsity_level = 1.0 - mask.float().mean()
logger.info(
f"Sparsity in layer {name} = {sparsity_level: .2%}"
)
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/lightning/callbacks/_data_sparstity_utils.py
|
from torch.ao.sparsity._experimental.data_sparsifier.data_norm_sparsifier import DataNormSparsifier
from torch.ao.sparsity._experimental.data_scheduler.base_data_scheduler import BaseDataScheduler
import torch
import torch.nn as nn
from typing import List
from torch.ao.sparsity._experimental.data_sparsifier.lightning.callbacks.data_sparsity import (
PostTrainingDataSparsity,
TrainingAwareDataSparsity
)
from torch.ao.sparsity._experimental.data_sparsifier.lightning.callbacks._data_sparstity_utils import _get_valid_name
from torch.ao.sparsity._experimental.data_sparsifier.base_data_sparsifier import SUPPORTED_TYPES
from torch.testing._internal.common_utils import TestCase
from torch.testing._internal.common_utils import run_tests
import importlib
import unittest
import warnings
import math
from torch.nn.utils.parametrize import is_parametrized
class DummyModel(nn.Module):
def __init__(self, iC: int, oC: List[int]):
super().__init__()
self.linears = nn.Sequential()
i = iC
for idx, c in enumerate(oC):
self.linears.append(nn.Linear(i, c, bias=False))
if idx < len(oC) - 1:
self.linears.append(nn.ReLU())
i = c
def _make_lightning_module(iC: int, oC: List[int]):
import pytorch_lightning as pl # type: ignore[import]
class DummyLightningModule(pl.LightningModule):
def __init__(self, ic: int, oC: List[int]):
super().__init__()
self.model = DummyModel(iC, oC)
def forward(self):
pass
return DummyLightningModule(iC, oC)
class StepSLScheduler(BaseDataScheduler):
"""The sparsity param of each data group is multiplied by gamma every step_size epochs.
"""
def __init__(self, data_sparsifier, schedule_param='sparsity_level',
step_size=1, gamma=2, last_epoch=-1, verbose=False):
self.gamma = gamma
self.step_size = step_size
super().__init__(data_sparsifier, schedule_param, last_epoch, verbose)
def get_schedule_param(self):
if not self._get_sp_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
data_groups = self.data_sparsifier.data_groups
if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0):
return {name: config[self.schedule_param] for name, config in data_groups.items()}
return {name: config[self.schedule_param] * self.gamma for name, config in data_groups.items()}
class TestPostTrainingCallback(TestCase):
def _check_on_fit_end(self, pl_module, callback, sparsifier_args):
"""Makes sure that each component of is working as expected while calling the
post-training callback.
Specifically, check the following -
1. sparsifier config is the same as input config
2. data sparsifier is correctly attached to the model
3. sparsity is achieved after .step()
4. non-sparsified values are the same as original values
"""
callback.on_fit_end(42, pl_module) # 42 is a dummy value
# check sparsifier config
for key, value in sparsifier_args.items():
assert callback.data_sparsifier.defaults[key] == value
# assert that the model is correctly attached to the sparsifier
for name, param in pl_module.model.named_parameters():
valid_name = _get_valid_name(name)
if type(param) not in SUPPORTED_TYPES:
assert valid_name not in callback.data_sparsifier.state
assert valid_name not in callback.data_sparsifier.data_groups
continue
assert valid_name in callback.data_sparsifier.data_groups
assert valid_name in callback.data_sparsifier.state
mask = callback.data_sparsifier.get_mask(name=valid_name)
# assert that some level of sparsity is achieved
assert (1.0 - mask.float().mean()) > 0.0
# make sure that non-zero values in data after squash mask are equal to original values
sparsified_data = callback.data_sparsifier.get_data(name=valid_name, return_original=False)
assert torch.all(sparsified_data[sparsified_data != 0] == param[sparsified_data != 0])
@unittest.skipIf(not importlib.util.find_spec("pytorch_lightning"), "No pytorch_lightning")
def test_post_training_callback(self):
sparsifier_args = {
'sparsity_level': 0.5,
'sparse_block_shape': (1, 4),
'zeros_per_block': 4
}
callback = PostTrainingDataSparsity(DataNormSparsifier, sparsifier_args)
pl_module = _make_lightning_module(100, [128, 256, 16])
self._check_on_fit_end(pl_module, callback, sparsifier_args)
class TestTrainingAwareCallback(TestCase):
"""Class to test in-training version of lightning callback
Simulates model training and makes sure that each hook is doing what is expected
"""
def _check_on_train_start(self, pl_module, callback, sparsifier_args, scheduler_args):
"""Makes sure that the data_sparsifier and data_scheduler objects are being created
correctly.
Basically, confirms that the input args and sparsifier/scheduler args are in-line.
"""
callback.on_train_start(42, pl_module) # 42 is a dummy value
# sparsifier and scheduler instantiated
assert callback.data_scheduler is not None and callback.data_sparsifier is not None
# data sparsifier args are correct
for key, value in sparsifier_args.items():
callback.data_sparsifier.defaults[key] == value
# data scheduler args are correct
for key, value in scheduler_args.items():
assert getattr(callback.data_scheduler, key) == value
def _simulate_update_param_model(self, pl_module):
"""This function might not be needed as the model is being copied
during train_epoch_end() but good to have if things change in the future
"""
for _, param in pl_module.model.named_parameters():
param.data = param + 1
def _check_on_train_epoch_start(self, pl_module, callback):
"""Basically ensures that the sparsifier's state is correctly being restored.
The state_dict() comparison is needed. Consider the flow -
**Epoch: 1**
1. on_train_epoch_start(): Nothing happens (for now)
2. on_train_epoch_end():
a) the model is copied into the data_sparsifier
b) .step() is called
c) internally, the state of each layer of the model inside
data sparsifier changes
**Epoch: 2**
1. on_train_epoch_start(): Assume nothing happens
2. on_train_epoch_end():
a) the model is copied into the data_sparsifier.
But wait! you need the config to attach layer
of the module to the sparsifier. If config is None,
the data_sparsifier uses the default config which we
do not want as the config of each layer changes after
.step()
Hence, we need to dump and restore the state_dict() everytime because we're
copying the model after each epoch.
Hence, it is essential to make sure that the sparsifier's state_dict() is being
correctly dumped and restored.
"""
# check if each component of state dict is being loaded correctly
callback.on_train_epoch_start(42, pl_module)
if callback.data_sparsifier_state_dict is None:
return
data_sparsifier_state_dict = callback.data_sparsifier.state_dict()
# compare container objects
container_obj1 = data_sparsifier_state_dict['_container']
container_obj2 = callback.data_sparsifier_state_dict['_container']
assert len(container_obj1) == len(container_obj2)
for key, value in container_obj2.items():
assert key in container_obj1
assert torch.all(value == container_obj1[key])
# compare state objects
state_obj1 = data_sparsifier_state_dict['state']
state_obj2 = callback.data_sparsifier_state_dict['state']
assert len(state_obj1) == len(state_obj2)
for key, value in state_obj2.items():
assert key in state_obj1
assert 'mask' in value and 'mask' in state_obj1[key]
assert torch.all(value['mask'] == state_obj1[key]['mask'])
# compare data_groups dict
data_grp1 = data_sparsifier_state_dict['data_groups']
data_grp2 = callback.data_sparsifier_state_dict['data_groups']
assert len(data_grp1) == len(data_grp2)
for key, value in data_grp2.items():
assert key in data_grp1
assert value == data_grp1[key]
def _check_on_train_epoch_end(self, pl_module, callback):
"""Checks the following -
1. sparsity is correctly being achieved after .step()
2. scheduler and data_sparsifier sparsity levels are in-line
"""
callback.on_train_epoch_end(42, pl_module)
data_scheduler = callback.data_scheduler
base_sl = data_scheduler.base_param
for name, _ in pl_module.model.named_parameters():
valid_name = _get_valid_name(name)
mask = callback.data_sparsifier.get_mask(name=valid_name)
# check sparsity levels
assert (1.0 - mask.float().mean()) > 0 # some sparsity level achieved
last_sl = data_scheduler.get_last_param()
last_epoch = data_scheduler.last_epoch
# check sparsity levels of scheduler
log_last_sl = math.log(last_sl[valid_name])
log_actual_sl = math.log(base_sl[valid_name] * (data_scheduler.gamma ** last_epoch))
assert log_last_sl == log_actual_sl
def _check_on_train_end(self, pl_module, callback):
"""Confirms that the mask is squashed after the training ends
This is achieved by making sure that each parameter in the internal container
are not parametrized.
"""
callback.on_train_end(42, pl_module)
# check that the masks have been squashed
for name, _ in pl_module.model.named_parameters():
valid_name = _get_valid_name(name)
assert not is_parametrized(callback.data_sparsifier._continer, valid_name)
@unittest.skipIf(not importlib.util.find_spec("pytorch_lightning"), "No pytorch_lightning")
def test_train_aware_callback(self):
sparsifier_args = {
'sparsity_level': 0.5,
'sparse_block_shape': (1, 4),
'zeros_per_block': 4
}
scheduler_args = {
'gamma': 2,
'step_size': 1
}
callback = TrainingAwareDataSparsity(
data_sparsifier_class=DataNormSparsifier,
data_sparsifier_args=sparsifier_args,
data_scheduler_class=StepSLScheduler,
data_scheduler_args=scheduler_args
)
pl_module = _make_lightning_module(100, [128, 256, 16])
# simulate the training process and check all steps
self._check_on_train_start(pl_module, callback, sparsifier_args, scheduler_args)
num_epochs = 5
for _ in range(0, num_epochs):
self._check_on_train_epoch_start(pl_module, callback)
self._simulate_update_param_model(pl_module)
self._check_on_train_epoch_end(pl_module, callback)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/lightning/tests/test_callbacks.py
|
from typing import Dict, List
import torch
import time
from torch.ao.sparsity._experimental.data_sparsifier import DataNormSparsifier
import os
from dlrm_utils import get_dlrm_model, get_valid_name # type: ignore[import]
import copy
import zipfile
from zipfile import ZipFile
import pandas as pd # type: ignore[import]
import argparse
def create_attach_sparsifier(model, **sparse_config):
"""Create a DataNormSparsifier and the attach it to the model embedding layers
Args:
model (nn.Module)
layer of the model that needs to be attached to the sparsifier
sparse_config (Dict)
Config to the DataNormSparsifier. Should contain the following keys:
- sparse_block_shape
- norm
- sparsity_level
"""
data_norm_sparsifier = DataNormSparsifier(**sparse_config)
for name, parameter in model.named_parameters():
if 'emb_l' in name:
valid_name = get_valid_name(name)
data_norm_sparsifier.add_data(name=valid_name, data=parameter)
return data_norm_sparsifier
def save_model_states(state_dict, sparsified_model_dump_path, save_file_name, sparse_block_shape, norm, zip=True):
"""Dumps the state_dict() of the model.
Args:
state_dict (Dict)
The state_dict() as dumped by dlrm_s_pytorch.py. Only the model state will be extracted
from this dictionary. This corresponds to the 'state_dict' key in the state_dict dictionary.
>>> model_state = state_dict['state_dict']
save_file_name (str)
The filename (not path) when saving the model state dictionary
sparse_block_shape (Tuple)
The block shape corresponding to the data norm sparsifier. **Used for creating save directory**
norm (str)
type of norm (L1, L2) for the datanorm sparsifier. **Used for creating save directory**
zip (bool)
if True, the file is zip-compressed.
"""
folder_name = os.path.join(sparsified_model_dump_path, str(norm))
# save model only states
folder_str = f"config_{sparse_block_shape}"
model_state = state_dict['state_dict']
model_state_path = os.path.join(folder_name, folder_str, save_file_name)
if not os.path.exists(os.path.dirname(model_state_path)):
os.makedirs(os.path.dirname(model_state_path))
torch.save(model_state, model_state_path)
if zip:
zip_path = model_state_path.replace('.ckpt', '.zip')
with ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip:
zip.write(model_state_path, save_file_name)
os.remove(model_state_path) # store it as zip, remove uncompressed
model_state_path = zip_path
model_state_path = os.path.abspath(model_state_path)
file_size = os.path.getsize(model_state_path)
file_size = file_size >> 20 # size in mb
return model_state_path, file_size
def sparsify_model(path_to_model, sparsified_model_dump_path):
"""Sparsifies the embedding layers of the dlrm model for different sparsity levels, norms and block shapes
using the DataNormSparsifier.
The function tracks the step time of the sparsifier and the size of the compressed checkpoint and collates
it into a csv.
Note::
This function dumps a csv sparse_model_metadata.csv in the current directory.
Args:
path_to_model (str)
path to the trained criteo model ckpt file
sparsity_levels (List of float)
list of sparsity levels to be sparsified on
norms (List of str)
list of norms to be sparsified on
sparse_block_shapes (List of tuples)
List of sparse block shapes to be sparsified on
"""
sparsity_levels = [sl / 10 for sl in range(0, 10)]
sparsity_levels += [0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1.0]
norms = ["L1", "L2"]
sparse_block_shapes = [(1, 1), (1, 4)]
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print("Running for sparsity levels - ", sparsity_levels)
print("Running for sparse block shapes - ", sparse_block_shapes)
print("Running for norms - ", norms)
orig_model = get_dlrm_model()
saved_state = torch.load(path_to_model, map_location=device)
orig_model.load_state_dict(saved_state['state_dict'])
orig_model = orig_model.to(device)
step_time_dict = {}
stat_dict: Dict[str, List] = {'norm': [], 'sparse_block_shape': [], 'sparsity_level': [],
'step_time_sec': [], 'zip_file_size': [], 'path': []}
for norm in norms:
for sbs in sparse_block_shapes:
if norm == "L2" and sbs == (1, 1):
continue
for sl in sparsity_levels:
model = copy.deepcopy(orig_model)
sparsifier = create_attach_sparsifier(model, sparse_block_shape=sbs, norm=norm, sparsity_level=sl)
t1 = time.time()
sparsifier.step()
t2 = time.time()
step_time = t2 - t1
norm_sl = f"{norm}_{sbs}_{sl}"
print(f"Step Time for {norm_sl}=: {step_time} s")
step_time_dict[norm_sl] = step_time
sparsifier.squash_mask()
saved_state['state_dict'] = model.state_dict()
file_name = f'criteo_model_norm={norm}_sl={sl}.ckpt'
state_path, file_size = save_model_states(saved_state, sparsified_model_dump_path, file_name, sbs, norm=norm)
stat_dict['norm'].append(norm)
stat_dict['sparse_block_shape'].append(sbs)
stat_dict['sparsity_level'].append(sl)
stat_dict['step_time_sec'].append(step_time)
stat_dict['zip_file_size'].append(file_size)
stat_dict['path'].append(state_path)
df = pd.DataFrame(stat_dict)
filename = 'sparse_model_metadata.csv'
df.to_csv(filename, index=False)
print(f"Saved sparsified metadata file in {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str)
parser.add_argument('--sparsified_model_dump_path', type=str)
args = parser.parse_args()
sparsify_model(args.model_path, args.sparsified_model_dump_path)
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/evaluate_disk_savings.py
|
from typing import Dict, List
import torch
from dlrm_s_pytorch import unpack_batch # type: ignore[import]
import numpy as np # type: ignore[import]
import time
from dlrm_utils import make_test_data_loader, fetch_model, dlrm_wrap # type: ignore[import]
import pandas as pd # type: ignore[import]
import argparse
def run_forward(model, **batch):
"""The purpose of this function is to time the forward run of the model.
The model forward happens a 100 times and each pass is timed. The average
of this 100 runs is returned as avg_time.
"""
time_list = []
X, lS_o, lS_i = batch['X'], batch['lS_o'], batch['lS_i']
for _ in range(100):
start = time.time()
with torch.no_grad():
model(X, lS_o, lS_i)
end = time.time()
time_taken = end - start
time_list.append(time_taken)
avg_time = np.mean(time_list[1:])
return avg_time
def make_sample_test_batch(raw_data_path, processed_data_path, device):
"""Create the test_data_loader and sample a batch from it. This batch will be used
to measure the forward pass of the model throughout this experiment.
"""
test_data_loader = make_test_data_loader(raw_data_path, processed_data_path)
test_iter = iter(test_data_loader)
test_batch = next(test_iter)
X_test, lS_o_test, lS_i_test, _, _, _ = unpack_batch(test_batch)
X, lS_o, lS_i = dlrm_wrap(X_test, lS_o_test, lS_i_test, device)
batch = {
'X': X,
'lS_o': lS_o,
'lS_i': lS_i
}
return batch
def measure_forward_pass(sparse_model_metadata, device, sparse_dlrm, **batch):
"""Measures and tracks the forward pass of the model for all the sparsity levels, block shapes and norms
available in sparse_model_metadata file.
If sparse_dlrm=True, then the SparseDLRM model is loaded, otherwise the standard one is.
"""
time_taken_dict: Dict[str, List] = {
"norm": [],
"sparse_block_shape": [],
"sparsity_level": [],
"time_taken": [],
}
metadata = pd.read_csv(sparse_model_metadata)
for _, row in metadata.iterrows():
norm, sbs, sl = row['norm'], row['sparse_block_shape'], row['sparsity_level']
model_path = row['path']
model = fetch_model(model_path, device, sparse_dlrm=sparse_dlrm)
time_taken = run_forward(model, **batch)
out_str = f"{norm}_{sbs}_{sl}={time_taken}"
print(out_str)
time_taken_dict["norm"].append(norm)
time_taken_dict["sparse_block_shape"].append(sbs)
time_taken_dict["sparsity_level"].append(sl)
time_taken_dict["time_taken"].append(time_taken)
time_df = pd.DataFrame(time_taken_dict)
if sparse_dlrm:
time_df['dlrm_type'] = 'with_torch_sparse'
else:
time_df['dlrm_type'] = 'without_torch_sparse'
return time_df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--raw_data_file', type=str)
parser.add_argument('--processed_data_file', type=str)
parser.add_argument('--sparse_model_metadata', type=str)
args = parser.parse_args()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(device)
batch = make_sample_test_batch(args.raw_data_file, args.processed_data_file, device)
print("Forward Time for Sparse DLRM")
sparse_dlrm_time_df = measure_forward_pass(args.sparse_model_metadata, device, sparse_dlrm=True, **batch)
print(sparse_dlrm_time_df)
print("Forward Time for Normal DLRM")
norm_dlrm_time_df = measure_forward_pass(args.sparse_model_metadata, device, sparse_dlrm=False, **batch)
print(norm_dlrm_time_df)
forward_time_all = pd.concat([sparse_dlrm_time_df, norm_dlrm_time_df])
forward_time_all.to_csv('dlrm_forward_time_info.csv', index=False)
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/evaluate_forward_time.py
|
from typing import Dict, List
import torch
from dlrm_s_pytorch import unpack_batch # type: ignore[import]
import numpy as np # type: ignore[import]
import sklearn # type: ignore[import]
from dlrm_utils import make_test_data_loader, dlrm_wrap, fetch_model
import pandas as pd # type: ignore[import]
import argparse
def inference_and_evaluation(dlrm, test_dataloader, device):
"""Perform inference and evaluation on the test dataset.
The function returns the dictionary that contains evaluation metrics such as accuracy, f1, auc,
precision, recall.
Note: This function is a rewritten version of ```inference()``` present in dlrm_s_pytorch.py
Args:
dlrm (nn.Module)
dlrm model object
test_data_loader (torch dataloader):
dataloader for the test dataset
device (torch.device)
device on which the inference happens
"""
nbatches = len(test_dataloader)
scores = []
targets = []
for i, testBatch in enumerate(test_dataloader):
# early exit if nbatches was set by the user and was exceeded
if nbatches > 0 and i >= nbatches:
break
X_test, lS_o_test, lS_i_test, T_test, _, _ = unpack_batch(
testBatch
)
# forward pass
X_test, lS_o_test, lS_i_test = dlrm_wrap(X_test, lS_o_test, lS_i_test, device, ndevices=1)
Z_test = dlrm(X_test, lS_o_test, lS_i_test)
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
scores.append(S_test)
targets.append(T_test)
scores = np.concatenate(scores, axis=0)
targets = np.concatenate(targets, axis=0)
metrics = {
"recall": lambda y_true, y_score: sklearn.metrics.recall_score(
y_true=y_true, y_pred=np.round(y_score)
),
"precision": lambda y_true, y_score: sklearn.metrics.precision_score(
y_true=y_true, y_pred=np.round(y_score)
),
"f1": lambda y_true, y_score: sklearn.metrics.f1_score(
y_true=y_true, y_pred=np.round(y_score)
),
"ap": sklearn.metrics.average_precision_score,
"roc_auc": sklearn.metrics.roc_auc_score,
"accuracy": lambda y_true, y_score: sklearn.metrics.accuracy_score(
y_true=y_true, y_pred=np.round(y_score)
),
"log_loss": lambda y_true, y_score: sklearn.metrics.log_loss(
y_true=y_true, y_pred=y_score
)
}
all_metrics = {}
for metric_name, metric_function in metrics.items():
all_metrics[metric_name] = round(metric_function(targets, scores), 3)
return all_metrics
def evaluate_metrics(test_dataloader, sparse_model_metadata):
"""Evaluates the metrics the sparsified metrics for the dlrm model on various sparsity levels,
block shapes and norms. This function evaluates the model on the test dataset and dumps
evaluation metrics in a csv file [model_performance.csv]
"""
metadata = pd.read_csv(sparse_model_metadata)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
metrics_dict: Dict[str, List] = {
"norm": [],
"sparse_block_shape": [],
"sparsity_level": [],
"precision": [],
"recall": [],
"f1": [],
"roc_auc": [],
"accuracy": [],
"log_loss": []
}
for _, row in metadata.iterrows():
norm, sbs, sl = row['norm'], row['sparse_block_shape'], row['sparsity_level']
model_path = row['path']
model = fetch_model(model_path, device)
model_metrics = inference_and_evaluation(model, test_dataloader, device)
key = f"{norm}_{sbs}_{sl}"
print(key, "=", model_metrics)
metrics_dict['norm'].append(norm)
metrics_dict['sparse_block_shape'].append(sbs)
metrics_dict['sparsity_level'].append(sl)
for key, value in model_metrics.items():
if key in metrics_dict:
metrics_dict[key].append(value)
sparse_model_metrics = pd.DataFrame(metrics_dict)
print(sparse_model_metrics)
filename = 'sparse_model_metrics.csv'
sparse_model_metrics.to_csv(filename, index=False)
print(f"Model metrics file saved to {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--raw_data_file', type=str)
parser.add_argument('--processed_data_file', type=str)
parser.add_argument('--sparse_model_metadata', type=str)
args = parser.parse_args()
# Fetch test data loader
test_dataloader = make_test_data_loader(args.raw_data_file, args.processed_data_file)
# Evaluate metrics
evaluate_metrics(test_dataloader, args.sparse_model_metadata)
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/evaluate_model_metrics.py
|
import torch
from dlrm_s_pytorch import DLRM_Net # type: ignore[import]
import numpy as np # type: ignore[import]
from dlrm_data_pytorch import CriteoDataset, collate_wrapper_criteo_offset # type: ignore[import]
import zipfile
import os
class SparseDLRM(DLRM_Net):
"""The SparseDLRM model is a wrapper around the DLRM_Net model that tries
to use torch.sparse tensors for the features obtained after the ```interact_features()```
call. The idea is to do a simple torch.mm() with the weight matrix of the first linear
layer of the top layer.
"""
def __init__(self, **args):
super().__init__(**args)
def forward(self, dense_x, lS_o, lS_i):
x = self.apply_mlp(dense_x, self.bot_l) # dense features
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l) # apply embedding bag
z = self.interact_features(x, ly)
z = z.to_sparse_coo()
z = torch.mm(z, self.top_l[0].weight.T).add(self.top_l[0].bias)
for layer in self.top_l[1:]:
z = layer(z)
return z
def get_valid_name(name):
"""Replaces '.' with '_' as names with '.' are invalid in data sparsifier
"""
return name.replace('.', '_')
def get_dlrm_model(sparse_dlrm=False):
"""Obtain dlrm model. The configs specified are based on the script in
bench/dlrm_s_criteo_kaggle.sh. The same config is used to train the model
for benchmarking on data sparsifier.
"""
dlrm_model_config = {
'm_spa': 16,
'ln_emb': np.array([1460, 583, 10131227, 2202608, 305, 24,
12517, 633, 3, 93145, 5683, 8351593,
3194, 27, 14992, 5461306, 10, 5652,
2173, 4, 7046547, 18, 15, 286181,
105, 142572], dtype=np.int32),
'ln_bot': np.array([13, 512, 256, 64, 16]),
'ln_top': np.array([367, 512, 256, 1]),
'arch_interaction_op': 'dot',
'arch_interaction_itself': False,
'sigmoid_bot': -1,
'sigmoid_top': 2,
'sync_dense_params': True,
'loss_threshold': 0.0,
'ndevices': 1,
'qr_flag': False,
'qr_operation': 'mult',
'qr_collisions': 4,
'qr_threshold': 200,
'md_flag': False,
'md_threshold': 200,
'weighted_pooling': None,
'loss_function': 'bce'
}
if sparse_dlrm:
dlrm_model = SparseDLRM(**dlrm_model_config)
else:
dlrm_model = DLRM_Net(**dlrm_model_config)
return dlrm_model
def dlrm_wrap(X, lS_o, lS_i, device, ndevices=1):
"""Rewritten simpler version of ```dlrm_wrap()``` found in dlrm_s_pytorch.py.
This function simply moves the input tensors into the device and without the forward pass
"""
if ndevices == 1:
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
return X.to(device), lS_o, lS_i
def make_test_data_loader(raw_data_file_path, processed_data_file):
"""Function to create dataset and dataloaders for the test dataset.
Rewritten simpler version of ```make_criteo_and_loaders()``` from the dlrm_data_pytorch.py
that makes the test dataset and dataloaders only for the ***kaggle criteo dataset***
"""
test_data = CriteoDataset(
"kaggle",
-1,
0.0,
"total",
"test",
raw_data_file_path,
processed_data_file,
False,
False,
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=16384,
shuffle=False,
num_workers=7,
collate_fn=collate_wrapper_criteo_offset,
pin_memory=False,
drop_last=False,
)
return test_loader
def fetch_model(model_path, device, sparse_dlrm=False):
"""This function unzips the zipped model checkpoint (if zipped) and returns a
model object
Args:
model_path (str)
path pointing to the zipped/raw model checkpoint file that was dumped in evaluate disk savings
device (torch.device)
device to which model needs to be loaded to
"""
if zipfile.is_zipfile(model_path):
with zipfile.ZipFile(model_path, 'r', zipfile.ZIP_DEFLATED) as zip_ref:
zip_ref.extractall(os.path.dirname(model_path))
unzip_path = model_path.replace('.zip', '.ckpt')
else:
unzip_path = model_path
model = get_dlrm_model(sparse_dlrm=sparse_dlrm)
model.load_state_dict(torch.load(unzip_path, map_location=device))
model = model.to(device)
model.eval()
# If there was a zip file, clean up the unzipped files
if zipfile.is_zipfile(model_path):
os.remove(unzip_path)
return model
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_sparsifier/benchmarks/dlrm_utils.py
|
from .base_pruner import BasePruner
from .parametrization import (
ActivationReconstruction,
BiasHook,
PruningParametrization,
ZeroesParametrization,
)
__all__ = [
"ActivationReconstruction",
"BasePruner",
"BiasHook",
"PruningParametrization",
"ZeroesParametrization",
]
|
pytorch-master
|
torch/ao/sparsity/_experimental/pruner/__init__.py
|
import copy
import warnings
import abc
import torch
from torch import nn
from torch.nn.utils import parametrize
from torch.nn.modules.container import ModuleDict, ModuleList
from .parametrization import PruningParametrization, ZeroesParametrization, ActivationReconstruction, BiasHook
from torch.ao.sparsity import BaseSparsifier, module_to_fqn, fqn_to_module
from torch.ao.sparsity.sparsifier.utils import get_arg_info_from_tensor_fqn
__all__ = ["BasePruner"]
SUPPORTED_MODULES = { # added to config if None given
nn.Linear,
nn.Conv2d,
nn.BatchNorm2d, # will need manual update to match conv2d
}
NEEDS_ZEROS = { # these layers should have pruned indices zero-ed, not removed
nn.BatchNorm2d
}
class BasePruner(BaseSparsifier):
r"""Base class for all pruners.
Abstract methods that need to be implemented:
- update_mask: Function to compute a new mask for all keys in the
`groups` attribute.
Args:
- defaults [dict]: default configurations will be attached to the
configuration. Only the keys that don't exist in the `config` will
be updated.
- also_prune_bias [bool]: whether to prune bias in addition to weights (to prune full output channel)
or not; default=True.
"""
def __init__(self, defaults, also_prune_bias=True):
super().__init__(defaults)
self.prune_bias = also_prune_bias
def _get_modules_and_tensor_names(self, config, use_path):
modules = []
tensor_names = []
if use_path:
if type(config['module']) is tuple: # (Conv2d, BN)
for module_fqn, tensor_name in zip(config['module_fqn'], config['tensor_name']):
module = fqn_to_module(self.model, module_fqn)
modules.append(module)
tensor_names.append(tensor_name)
else:
module = fqn_to_module(self.model, config['module_fqn'])
modules.append(module)
tensor_name = config['tensor_name']
tensor_names.append(tensor_name)
else:
if type(config['module']) is tuple:
for module, tensor_name in zip(config['module'], config['tensor_name']):
modules.append(module)
tensor_names.append(tensor_name)
else:
module = config['module']
modules.append(module)
tensor_name = config['tensor_name']
tensor_names.append(tensor_name)
return modules, tensor_names
def _prepare(self, use_path=False, *args, **kwargs):
r"""Adds mask parametrization to the layer weight
"""
self.activation_handles = [] # store removable hook handles
self.bias_handles = []
for config in self.groups:
modules, tensor_names = self._get_modules_and_tensor_names(config, use_path)
for module, tensor_name in zip(modules, tensor_names):
if not isinstance(module, tuple(NEEDS_ZEROS)):
# add pruning parametrization and forward hooks
if getattr(module, 'mask', None) is None:
module.register_buffer('mask', torch.tensor(getattr(module, tensor_name).shape[0]))
param = config.get('parametrization', PruningParametrization)
parametrize.register_parametrization(module, tensor_name, param(module.mask), unsafe=True)
assert isinstance(module.parametrizations, ModuleDict) # make mypy happy
assert isinstance(module.parametrizations.weight, ModuleList)
if isinstance(module, tuple(SUPPORTED_MODULES)):
self.activation_handles.append(module.register_forward_hook(
ActivationReconstruction(getattr(module.parametrizations, tensor_name)[0])
))
else:
raise NotImplementedError("This module type is not supported yet.")
else: # needs zeros
if getattr(module, 'mask', None) is None:
module.register_buffer('mask', torch.tensor(getattr(module, tensor_name).shape[0]))
param = config.get('parametrization', ZeroesParametrization)
parametrize.register_parametrization(module, tensor_name, param(module.mask), unsafe=True)
if module.bias is not None:
module.register_parameter('_bias', nn.Parameter(module.bias.detach()))
module.bias = None
self.bias_handles.append(module.register_forward_hook(BiasHook(module.parametrizations.weight[0], self.prune_bias)))
if len(modules) == 2: # (Conv2d, BN)
# should have the same set of pruned outputs
modules[1].parametrizations.weight[0].pruned_outputs = modules[0].parametrizations.weight[0].pruned_outputs
def make_config_from_model(self, model, SUPPORTED_MODULES=SUPPORTED_MODULES, NEEDS_ZEROS=NEEDS_ZEROS):
self.config = []
stack = [model]
while stack:
module = stack.pop()
for name, child in module.named_children():
if type(child) in SUPPORTED_MODULES:
child_fqn = module_to_fqn(model, child)
assert isinstance(child_fqn, str) # for mypy
self.config.append({'tensor_fqn': child_fqn + '.weight'})
else:
if NEEDS_ZEROS is not None and type(child) in NEEDS_ZEROS and hasattr(self, "prune_bias") and self.prune_bias:
# only useful for Pruner
warnings.warn(f"Models with {type(child)} layers have config provided by user.")
stack.append(child)
def prepare(self, model, config):
r"""Prepares a model, by adding the parametrizations and forward post-hooks.
Note::
The model is modified inplace. If you need to preserve the original
model, use copy.deepcopy.
Args:
- model [nn.Module]: model to configure. The model itself is not saved
but used for the state_dict saving / loading.
- config [list]: configuration elements could either be instances of
tuples of dict maps or dict maps. The dicts must have a key 'tensor_fqn' with the
value being the fqn of the tensor to be pruned.
"""
self.model = model # TODO: Need to figure out how to load without this.
self.config = config
# If no config -- try getting all the supported layers
if self.config is None:
# Add all models to the config
self.make_config_from_model(self.model)
for module_config in self.config:
if type(module_config) is tuple:
first_layer, next_layer = module_config
assert isinstance(first_layer, nn.Conv2d) and isinstance(next_layer, nn.BatchNorm2d)
assert isinstance(module_config, tuple) # for mypy
module_config = {'module': module_config}
local_args = copy.deepcopy(self.defaults)
local_args.update(module_config)
module_fqn_list = []
tensor_fqn_list = []
tensor_name_list = []
for module in local_args['module']:
module_fqn = module_to_fqn(model, module)
if module_fqn is None:
module_fqn = ''
if module_fqn and module_fqn[0] == '.':
module_fqn = module_fqn[1:]
module_fqn_list.append(module_fqn)
tensor_fqn_list.append(module_fqn + '.weight')
tensor_name_list.append('weight')
local_args['module_fqn'] = module_fqn_list
local_args['tensor_fqn'] = tensor_fqn_list
local_args['tensor_name'] = tensor_name_list
else:
if isinstance(module_config, nn.Module):
module_config = {'module': module_config} # type: ignore[dict-item]
local_args = copy.deepcopy(self.defaults)
local_args.update(module_config)
# now that we're working with a dict, does it have the new format?
if local_args.get('tensor_fqn', None) is not None:
tensor_fqn = local_args.get('tensor_fqn')
assert isinstance(tensor_fqn, str) # for mypy
info_from_tensor_fqn = get_arg_info_from_tensor_fqn(model, tensor_fqn)
for key in info_from_tensor_fqn.keys():
if key in local_args:
# info_from_tensor_fqn will chop leading '.' from tensor_fqn so ignore that
assert key == 'tensor_fqn' or info_from_tensor_fqn[key] == local_args[key], (
"Given both `{}` and `tensor_fqn`, it is expected them to "
"agree!".format(key)
)
local_args.update(info_from_tensor_fqn)
else:
module = local_args['module']
module_fqn = module_to_fqn(model, module)
if module_fqn and module_fqn[0] == '.':
module_fqn = module_fqn[1:]
local_args['module_fqn'] = module_fqn
local_args['tensor_name'] = "weight"
assert isinstance(module_fqn, str) # for mypy
local_args['tensor_fqn'] = module_fqn + ".weight"
self.groups.append(local_args)
self._prepare()
def squash_mask(self, use_path=False, *args, **kwargs):
for config in self.groups:
modules, tensor_names = self._get_modules_and_tensor_names(config, use_path)
for module, tensor_name in zip(modules, tensor_names):
parametrize.remove_parametrizations(module, tensor_name,
leave_parametrized=True)
if getattr(module._parameters, 'mask', None):
del module._parameters['mask']
elif getattr(module._buffers, 'mask', None):
del module._buffers['mask']
delattr(module, 'mask')
def get_module_pruned_outputs(self, module, tensor_name='weight'):
r"""Returns the set of pruned indices of module"""
assert parametrize.is_parametrized(module) # can only get pruned indices of pruned module
return getattr(module.parametrizations, tensor_name)[0].pruned_outputs # assume only one parametrization attached
def step(self, use_path=False):
if not self.enable_mask_update:
return
with torch.no_grad():
for config in self.groups:
modules, tensor_names = self._get_modules_and_tensor_names(config, use_path)
untupled_args: dict = {}
untupled_args.update()
# only need to update the first module in modules if len(modules) > 1
# since they should share the same set of pruned outputs
untupled_args['module'] = modules[0]
untupled_args['tensor_name'] = tensor_names[0]
self.update_mask(**config)
@abc.abstractmethod
def update_mask(self, module, tensor_name, **kwargs):
pass
|
pytorch-master
|
torch/ao/sparsity/_experimental/pruner/base_pruner.py
|
import torch
from torch import nn
from typing import Any, List
__all__ = ['PruningParametrization', 'ZeroesParametrization', 'ActivationReconstruction', 'BiasHook']
class PruningParametrization(nn.Module):
def __init__(self, original_outputs):
super().__init__()
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
def forward(self, x):
valid_outputs = self.original_outputs - self.pruned_outputs
return x[list(valid_outputs)]
class ZeroesParametrization(nn.Module):
r"""Zero out pruned channels instead of removing.
E.g. used for Batch Norm pruning, which should match previous Conv2d layer."""
def __init__(self, original_outputs):
super().__init__()
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
def forward(self, x):
x.data[list(self.pruned_outputs)] = 0
return x
class ActivationReconstruction:
def __init__(self, parametrization):
self.param = parametrization
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
pruned_outputs = self.param.pruned_outputs
valid_columns = list(max_outputs - pruned_outputs)
# get size of reconstructed output
sizes = list(output.shape)
sizes[1] = len(max_outputs)
# get valid indices of reconstructed output
indices: List[Any] = []
for size in output.shape:
indices.append(slice(0, size, 1))
indices[1] = valid_columns
reconstructed_tensor = torch.zeros(sizes,
dtype=output.dtype,
device=output.device,
layout=output.layout)
reconstructed_tensor[indices] = output
return reconstructed_tensor
class BiasHook:
def __init__(self, parametrization, prune_bias):
self.param = parametrization
self.prune_bias = prune_bias
def __call__(self, module, input, output):
pruned_outputs = self.param.pruned_outputs
if getattr(module, '_bias', None) is not None:
bias = module._bias.data
if self.prune_bias:
bias[list(pruned_outputs)] = 0
# reshape bias to broadcast over output dimensions
idx = [1] * len(output.shape)
idx[1] = -1
bias = bias.reshape(idx)
output += bias
return output
|
pytorch-master
|
torch/ao/sparsity/_experimental/pruner/parametrization.py
|
from typing import Dict, Any, List
import torch
from collections import defaultdict
from torch import nn
import copy
from ...sparsifier.utils import fqn_to_module, module_to_fqn
import warnings
__all__ = ['ActivationSparsifier']
class ActivationSparsifier:
r"""
The Activation sparsifier class aims to sparsify/prune activations in a neural
network. The idea is to attach the sparsifier to a layer (or layers) and it
zeroes out the activations based on the mask_fn (or sparsification function)
input by the user.
The mask_fn is applied once all the inputs are aggregated and reduced i.e.
mask = mask_fn(reduce_fn(aggregate_fn(activations)))
Note::
The sparsification mask is computed on the input **before it goes through the attached layer**.
Args:
model (nn.Module):
The model whose layers will be sparsified. The layers that needs to be
sparsified should be added separately using the register_layer() function
aggregate_fn (Optional, Callable):
default aggregate_fn that is used if not specified while registering the layer.
specifies how inputs should be aggregated over time.
The aggregate_fn should usually take 2 torch tensors and return the aggregated tensor.
Example
def add_agg_fn(tensor1, tensor2): return tensor1 + tensor2
reduce_fn (Optional, Callable):
default reduce_fn that is used if not specified while registering the layer.
reduce_fn will be called on the aggregated tensor i.e. the tensor obtained after
calling agg_fn() on all inputs.
Example
def mean_reduce_fn(agg_tensor): return agg_tensor.mean(dim=0)
mask_fn (Optional, Callable):
default mask_fn that is used to create the sparsification mask using the tensor obtained after
calling the reduce_fn(). This is used by default if a custom one is passed in the
register_layer().
Note that the mask_fn() definition should contain the sparse arguments that is passed in sparse_config
arguments.
features (Optional, list):
default selected features to sparsify.
If this is non-empty, then the mask_fn will be applied for each feature of the input.
For example,
mask = [mask_fn(reduce_fn(aggregated_fn(input[feature])) for feature in features]
feature_dim (Optional, int):
default dimension of input features. Again, features along this dim will be chosen
for sparsification.
sparse_config (Dict):
Default configuration for the mask_fn. This config will be passed
with the mask_fn()
Example:
>>> # xdoctest: +SKIP
>>> model = SomeModel()
>>> act_sparsifier = ActivationSparsifier(...) # init activation sparsifier
>>> # Initialize aggregate_fn
>>> def agg_fn(x, y):
>>> return x + y
>>>
>>> # Initialize reduce_fn
>>> def reduce_fn(x):
>>> return torch.mean(x, dim=0)
>>>
>>> # Initialize mask_fn
>>> def mask_fn(data):
>>> return torch.eye(data.shape).to(data.device)
>>>
>>>
>>> act_sparsifier.register_layer(model.some_layer, aggregate_fn=agg_fn, reduce_fn=reduce_fn, mask_fn=mask_fn)
>>>
>>> # start training process
>>> for _ in [...]:
>>> # epoch starts
>>> # model.forward(), compute_loss() and model.backwards()
>>> # epoch ends
>>> act_sparsifier.step()
>>> # end training process
>>> sparsifier.squash_mask()
"""
def __init__(self, model: nn.Module, aggregate_fn=None, reduce_fn=None, mask_fn=None,
features=None, feature_dim=None, **sparse_config):
self.model = model
self.defaults: Dict[str, Any] = defaultdict()
self.defaults['sparse_config'] = sparse_config
# functions
self.defaults['aggregate_fn'] = aggregate_fn
self.defaults['reduce_fn'] = reduce_fn
self.defaults['mask_fn'] = mask_fn
# default feature and feature_dim
self.defaults['features'] = features
self.defaults['feature_dim'] = feature_dim
self.data_groups: Dict[str, Dict] = defaultdict(dict) # contains all relevant info w.r.t each registered layer
self.state: Dict[str, Any] = defaultdict(dict) # layer name -> mask
@staticmethod
def _safe_rail_checks(args):
"""Makes sure that some of the functions and attributes are not passed incorrectly
"""
# if features are not None, then feature_dim must not be None
features, feature_dim = args['features'], args['feature_dim']
if features is not None:
assert feature_dim is not None, "need feature dim to select features"
# all the *_fns should be callable
fn_keys = ['aggregate_fn', 'reduce_fn', 'mask_fn']
for key in fn_keys:
fn = args[key]
assert callable(fn), 'function should be callable'
def _aggregate_hook(self, name):
"""Returns hook that computes aggregate of activations passing through.
"""
# gather some data
feature_dim = self.data_groups[name]['feature_dim']
features = self.data_groups[name]['features']
agg_fn = self.data_groups[name]['aggregate_fn']
def hook(module, input) -> None:
input_data = input[0]
data = self.data_groups[name].get('data') # aggregated data
if features is None:
# no features associated, data should not be a list
if data is None:
data = torch.zeros_like(input_data)
self.state[name]['mask'] = torch.ones_like(input_data)
out_data = agg_fn(data, input_data)
else:
# data should be a list [aggregated over each feature only]
if data is None:
out_data = [0 for _ in range(0, len(features))] # create one incase of 1st forward
self.state[name]['mask'] = [0 for _ in range(0, len(features))]
else:
out_data = data # a list
# compute aggregate over each feature
for feature_idx in range(len(features)):
# each feature is either a list or scalar, convert it to torch tensor
feature_tensor = torch.Tensor([features[feature_idx]]).long().to(input_data.device)
data_feature = torch.index_select(input_data, feature_dim, feature_tensor)
if data is None:
curr_data = torch.zeros_like(data_feature)
self.state[name]['mask'][feature_idx] = torch.ones_like(data_feature)
else:
curr_data = data[feature_idx]
out_data[feature_idx] = agg_fn(curr_data, data_feature)
self.data_groups[name]['data'] = out_data
return hook
def register_layer(self, layer: nn.Module, aggregate_fn=None, reduce_fn=None,
mask_fn=None, features=None, feature_dim=None, **sparse_config):
r"""
Registers a layer for sparsification. The layer should be part of self.model.
Specifically, registers a pre-forward hook to the layer. The hook will apply the aggregate_fn
and store the aggregated activations that is input over each step.
Note::
- There is no need to pass in the name of the layer as it is automatically computed as per
the fqn convention.
- All the functions (fn) passed as argument will be called at a dim, feature level.
"""
name = module_to_fqn(self.model, layer)
assert name is not None, "layer not found in the model" # satisfy mypy
if name in self.data_groups: # unregister layer if already present
warnings.warn("layer already attached to the sparsifier, deregistering the layer and registering with new config")
self.unregister_layer(name=name)
local_args = copy.deepcopy(self.defaults)
update_dict = {
'aggregate_fn': aggregate_fn,
'reduce_fn': reduce_fn,
'mask_fn': mask_fn,
'features': features,
'feature_dim': feature_dim,
'layer': layer
}
local_args.update((arg, val) for arg, val in update_dict.items() if val is not None)
local_args['sparse_config'].update(sparse_config)
self._safe_rail_checks(local_args)
self.data_groups[name] = local_args
agg_hook = layer.register_forward_pre_hook(self._aggregate_hook(name=name))
self.state[name]['mask'] = None # mask will be created when model forward is called.
# attach agg hook
self.data_groups[name]['hook'] = agg_hook
# for serialization purposes, we know whether aggregate_hook is attached
# or sparsify_hook()
self.data_groups[name]['hook_state'] = "aggregate" # aggregate hook is attached
def get_mask(self, name: str = None, layer: nn.Module = None):
"""
Returns mask associated to the layer.
The mask is
- a torch tensor is features for that layer is None.
- a list of torch tensors for each feature, otherwise
Note::
The shape of the mask is unknown until model.forward() is applied.
Hence, if get_mask() is called before model.forward(), an
error will be raised.
"""
assert name is not None or layer is not None, "Need at least name or layer obj to retrieve mask"
if name is None:
assert layer is not None
name = module_to_fqn(self.model, layer)
assert name is not None, "layer not found in the specified model"
if name not in self.state:
raise ValueError("Error: layer with the given name not found")
mask = self.state[name].get('mask', None)
if mask is None:
raise ValueError("Error: shape unknown, call layer() routine at least once to infer mask")
return mask
def unregister_layer(self, name):
"""Detaches the sparsifier from the layer
"""
# detach any hooks attached
self.data_groups[name]['hook'].remove()
# pop from the state dict
self.state.pop(name)
# pop from the data groups
self.data_groups.pop(name)
def step(self):
"""Internally calls the update_mask() function for each layer
"""
with torch.no_grad():
for name, configs in self.data_groups.items():
data = configs['data']
self.update_mask(name, data, configs)
self.data_groups[name].pop('data') # reset the accumulated data
def update_mask(self, name, data, configs):
"""
Called for each registered layer and does the following-
1. apply reduce_fn on the aggregated activations
2. use mask_fn to compute the sparsification mask
Note:
the reduce_fn and mask_fn is called for each feature, dim over the data
"""
mask = self.get_mask(name)
sparse_config = configs['sparse_config']
features = configs['features']
reduce_fn = configs['reduce_fn']
mask_fn = configs['mask_fn']
if features is None:
data = reduce_fn(data)
mask.data = mask_fn(data, **sparse_config)
else:
for feature_idx in range(len(features)):
data_feature = reduce_fn(data[feature_idx])
mask[feature_idx].data = mask_fn(data_feature, **sparse_config)
def _sparsify_hook(self, name):
"""Returns hook that applies sparsification mask to input entering the attached layer
"""
mask = self.get_mask(name)
features = self.data_groups[name]['features']
feature_dim = self.data_groups[name]['feature_dim']
def hook(module, input):
input_data = input[0]
if features is None:
# apply to all the features
return input_data * mask
else:
# apply per feature, feature_dim
for feature_idx in range(0, len(features)):
feature = torch.Tensor([features[feature_idx]]).long().to(input_data.device)
sparsified = torch.index_select(input_data, feature_dim, feature) * mask[feature_idx]
input_data.index_copy_(feature_dim, feature, sparsified)
return input_data
return hook
def squash_mask(self, attach_sparsify_hook=True, **kwargs):
"""
Unregisters aggreagate hook that was applied earlier and registers sparsification hooks if
attach_sparsify_hook = True.
"""
for name, configs in self.data_groups.items():
# unhook agg hook
configs['hook'].remove()
configs.pop('hook')
self.data_groups[name]['hook_state'] = "None"
if attach_sparsify_hook:
configs['hook'] = configs['layer'].register_forward_pre_hook(self._sparsify_hook(name))
configs['hook_state'] = "sparsify" # signals that sparsify hook is now attached
def _get_serializable_data_groups(self):
"""Exclude hook and layer from the config keys before serializing
TODO: Might have to treat functions (reduce_fn, mask_fn etc) in a different manner while serializing.
For time-being, functions are treated the same way as other attributes
"""
data_groups: Dict[str, Any] = defaultdict()
for name, config in self.data_groups.items():
new_config = {key: value for key, value in config.items() if key not in ['hook', 'layer']}
data_groups[name] = new_config
return data_groups
def _convert_mask(self, states_dict, sparse_coo=True):
r"""Converts the mask to sparse coo or dense depending on the `sparse_coo` argument.
If `sparse_coo=True`, then the mask is stored as sparse coo else dense tensor
"""
states = copy.deepcopy(states_dict)
for _, state in states.items():
if state['mask'] is not None:
if isinstance(state['mask'], List):
for idx in range(len(state['mask'])):
if sparse_coo:
state['mask'][idx] = state['mask'][idx].to_sparse_coo()
else:
state['mask'][idx] = state['mask'][idx].to_dense()
else:
if sparse_coo:
state['mask'] = state['mask'].to_sparse_coo()
else:
state['mask'] = state['mask'].to_dense()
return states
def state_dict(self) -> Dict[str, Any]:
r"""Returns the state of the sparsifier as a :class:`dict`.
It contains:
* state - contains name -> mask mapping.
* data_groups - a dictionary containing all config information for each
layer
* defaults - the default config while creating the constructor
"""
data_groups = self._get_serializable_data_groups()
state = self._convert_mask(self.state)
return {
'state': state,
'data_groups': data_groups,
'defaults': self.defaults
}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
r"""The load_state_dict() restores the state of the sparsifier based on the state_dict
Args:
* state_dict - the dictionary that to which the current sparsifier needs to be restored to
"""
state = state_dict['state']
data_groups, defaults = state_dict['data_groups'], state_dict['defaults']
self.__set_state__({'state': state, 'data_groups': data_groups, 'defaults': defaults})
def __get_state__(self) -> Dict[str, Any]:
data_groups = self._get_serializable_data_groups()
state = self._convert_mask(self.state)
return {
'defaults': self.defaults,
'state': state,
'data_groups': data_groups,
}
def __set_state__(self, state: Dict[str, Any]) -> None:
state['state'] = self._convert_mask(state['state'], sparse_coo=False) # convert mask to dense tensor
self.__dict__.update(state)
# need to attach layer and hook info into the data_groups
for name, config in self.data_groups.items():
# fetch layer
layer = fqn_to_module(self.model, name)
assert layer is not None # satisfy mypy
# if agg_mode is True, then layer in aggregate mode
if "hook_state" in config and config['hook_state'] == "aggregate":
hook = layer.register_forward_pre_hook(self._aggregate_hook(name))
elif "hook_state" in config and config["hook_state"] == "sparsify":
hook = layer.register_forward_pre_hook(self._sparsify_hook(name))
config['layer'] = layer
config['hook'] = hook
def __repr__(self):
format_string = self.__class__.__name__ + ' ('
for name, config in self.data_groups.items():
format_string += '\n'
format_string += '\tData Group\n'
format_string += f'\t name: {name}\n'
for key in sorted(config.keys()):
if key in ['data', 'hook', 'reduce_fn', 'mask_fn', 'aggregate_fn']:
continue
format_string += f'\t {key}: {config[key]}\n'
format_string += ')'
return format_string
|
pytorch-master
|
torch/ao/sparsity/_experimental/activation_sparsifier/activation_sparsifier.py
|
pytorch-master
|
torch/ao/sparsity/_experimental/activation_sparsifier/__init__.py
|
|
from .base_data_scheduler import BaseDataScheduler
__all__ = [
"BaseDataScheduler",
]
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_scheduler/__init__.py
|
from functools import wraps
import weakref
import abc
import warnings
from ..data_sparsifier import BaseDataSparsifier
__all__ = ['BaseDataScheduler']
class BaseDataScheduler(object):
r"""
The BaseDataScheduler is the abstract scheduler class specifically for the
BaseDataSparsifier class. This class controls a specific hyperparameter of
the sparsifier class and varies it across the training process (or across time).
Args:
data_sparsifier (instance of BaseDataSparsifier)
Implemented class data sparsifier class wherein the update_mask is implemented
schedule_param (str)
A specific hyperparameter of the passed sparsifier that needs to be scheduled/varied
last_epoch (int, default=-1)
This is specifically is passed when training needs to be resumed from a particular
point.
verbose (bool, default=False)
Verbosity of the BaseDataScheduler
The *get_hyperparam()* function needs to be implemented by the user.
"""
def __init__(self, data_sparsifier, schedule_param: str, last_epoch=-1, verbose=False):
# Attach sparsifier
if not isinstance(data_sparsifier, BaseDataSparsifier):
raise TypeError('{} is not an instance of torch.ao.sparsity.BaseDataSparsifier'.format(
type(data_sparsifier).__name__))
self.data_sparsifier = data_sparsifier
self.schedule_param = schedule_param
# Initialize epoch and base hyper-params
self.base_param = {
name: config.get(schedule_param, None)
for name, config in self.data_sparsifier.data_groups.items()
}
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `scheduler.step()` is called after
# `sparsifier.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `sparsifier.step()` has already been replaced, return.
return method
# Keep a weak reference to the sparsifier instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1 # type: ignore[union-attr]
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True # type: ignore[attr-defined]
return wrapper
self.data_sparsifier.step = with_counter(self.data_sparsifier.step) # type: ignore[assignment]
self.data_sparsifier._step_count = 0 # type: ignore[attr-defined]
self._step_count: int = 0
self.verbose = verbose
# Housekeeping
self._get_sp_called_within_step: bool = False # sp -> schedule parameter
self.step()
@abc.abstractmethod
def get_schedule_param(self):
r"""
Abstract method that needs to be implemented by the child class.
The expected return type should is a dictionary of name to schedule_param value
The returned values will be updated in sparsifier when the scheduler step() function
is called.
Example:
>>> def get_schedule_param(self):
... new_param = {}
... for name in self.sparsifier.data_groups.keys():
... new_param[name] = self.sparsifier.data_groups[name][self.schedule_param] * 0.5
... return new_param
When the step() function is called, the value in self.sparsifier.data_groups[name][self.schedule_param]
would be halved
"""
raise NotImplementedError
def __repr__(self):
format_string = self.__class__.__name__ + ' ('
format_string += '\n'
format_string += 'Data Sparsifier {0}\n'.format(self.data_sparsifier)
format_string += ' {0}: {1}\n'.format(self.schedule_param, self.base_param)
format_string += ')'
return format_string
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the sparsifier.
Note:
The scheduler class does not track the state of the data_sparsifier.
Make sure to store the state of the sparsifier before storing the
state of the scheduler
"""
return {key: value for key, value in self.__dict__.items() if key != 'data_sparsifier'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Note:
Remember to restore the state of the data_sparsifier before the scheduler.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_param(self):
return self._last_param
def step(self):
# Raise warning if trying to call scheduler step before the sparsifier.
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.data_sparsifier.step, "_with_counter"):
warnings.warn("Seems like `data_sparsifier.step()` has been overridden after sparsity scheduler "
"initialization. Please, make sure to call `data_sparsifier.step()` before "
"`scheduler.step()`.", UserWarning)
# Just check if there were two first scheduler.step() calls before sparsifier.step()
elif self.data_sparsifier._step_count < 1: # type: ignore[attr-defined]
warnings.warn("Detected call of `scheduler.step()` before `data_sparsifier.step()`. "
"You have to make sure you run the data_sparsifier.step() BEFORE any "
"calls to the scheduer.step().", UserWarning)
self._step_count += 1
class _enable_get_sp_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_sp_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_sp_called_within_step = False
with _enable_get_sp_call(self):
self.last_epoch += 1
updated_scheduler_params = self.get_schedule_param()
for name, param in updated_scheduler_params.items():
self.data_sparsifier.data_groups[name][self.schedule_param] = param
if self.verbose:
print(f"Adjusting {self.schedule_param} for group {name} to {param}")
self._last_param = {
name: config.get(self.schedule_param, None)
for name, config in self.data_sparsifier.data_groups.items()
}
self.data_sparsifier.enable_mask_update = True
|
pytorch-master
|
torch/ao/sparsity/_experimental/data_scheduler/base_data_scheduler.py
|
from torch.ao.sparsity import BaseSparsifier
from functools import wraps
import warnings
import weakref
__all__ = ["BaseScheduler"]
class BaseScheduler(object):
def __init__(self, sparsifier, last_epoch=-1, verbose=False):
# Attach sparsifier
if not isinstance(sparsifier, BaseSparsifier):
raise TypeError('{} is not an instance of torch.ao.sparsity.BaseSparsifier'.format(
type(sparsifier).__name__))
self.sparsifier = sparsifier
# Initialize epoch and base sparsity levels
self.base_sl = [group['sparsity_level'] for group in sparsifier.groups]
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `scheduler.step()` is called after
# `sparsifier.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `sparsifier.step()` has already been replaced, return.
return method
# Keep a weak reference to the sparsifier instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1 # type: ignore[union-attr]
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True # type: ignore[attr-defined]
return wrapper
self.sparsifier.step = with_counter(self.sparsifier.step) # type: ignore[assignment]
self.sparsifier._step_count = 0 # type: ignore[attr-defined]
self._step_count: int = 0
self.verbose = verbose
# Housekeeping
self._get_sl_called_within_step: bool = False
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the sparsifier.
"""
return {key: value for key, value in self.__dict__.items() if key != 'sparsifier'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_sl(self):
""" Return last computed sparsity level by current scheduler.
"""
return self._last_sl
def get_sl(self):
# Compute sparsity level using chainable form of the scheduler
# Note: This method is not intended to be called directly, and is only
# used by the ".step" method. Use .get_last_sl() instead.
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
"please use `get_last_sl()`.")
raise NotImplementedError
def print_sl(self, is_verbose, group, sl, epoch=None):
"""Display the current sparsity level.
"""
if is_verbose:
if epoch is None:
print('Adjusting sparsity level'
' of group {} to {:.4e}.'.format(group, sl))
else:
print('Epoch {:5d}: adjusting sparsity level'
' of group {} to {:.4e}.'.format(epoch, group, sl))
def __repr__(self):
format_string = self.__class__.__name__ + ' ('
format_string += '\n'
format_string += 'Sparsifier {0}\n'.format(self.sparsifier)
format_string += ' {0}: {1}\n'.format('base_sl', self.base_sl)
format_string += ')'
return format_string
def step(self, epoch=None):
# Raise warning if trying to call scheduler step before the sparsifier.
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.sparsifier.step, "_with_counter"):
warnings.warn("Seems like `sparsifier.step()` has been overridden after sparsity scheduler "
"initialization. Please, make sure to call `sparsifier.step()` before "
"`scheduler.step()`.", UserWarning)
# Just check if there were two first scheduler.step() calls before sparsifier.step()
elif self.sparsifier._step_count < 1: # type: ignore[attr-defined]
warnings.warn("Detected call of `scheduler.step()` before `sparsifier.step()`. "
"You have to make sure you run the sparsifier.step() BEFORE any "
"calls to the scheduer.step().", UserWarning)
self._step_count += 1
class _enable_get_sl_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_sl_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_sl_called_within_step = False
with _enable_get_sl_call(self):
self.last_epoch += 1
values = self.get_sl()
for i, data in enumerate(zip(self.sparsifier.groups, values)):
param_group, sl = data
param_group['sparsity_level'] = sl
self.print_sl(self.verbose, i, sl, epoch)
self._last_sl = [group['sparsity_level'] for group in self.sparsifier.groups]
self.sparsifier.enable_mask_update = True
|
pytorch-master
|
torch/ao/sparsity/scheduler/base_scheduler.py
|
pytorch-master
|
torch/ao/sparsity/scheduler/__init__.py
|
|
import warnings
from .base_scheduler import BaseScheduler
__all__ = ["LambdaSL"]
class LambdaSL(BaseScheduler):
"""Sets the sparsity level of each parameter group to the final sl
times a given function. When last_epoch=-1, sets initial sl as zero.
Args:
sparsifier (BaseSparsifier): Wrapped sparsifier.
sl_lambda (function or list): A function which computes a multiplicative
factor given an integer parameter epoch, or a list of such
functions, one for each group in sparsifier.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
Example:
>>> # Assuming sparsifier has two groups.
>>> lambda1 = lambda epoch: epoch // 30
>>> lambda2 = lambda epoch: 0.95 ** epoch
>>> # xdoctest: +SKIP
>>> scheduler = LambdaSL(sparsifier, sl_lambda=[lambda1, lambda2])
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, sparsifier, sl_lambda, last_epoch=-1, verbose=False):
self.sparsifier = sparsifier
if not isinstance(sl_lambda, list) and not isinstance(sl_lambda, tuple):
self.sl_lambdas = [sl_lambda] * len(sparsifier.groups)
else:
if len(sl_lambda) != len(sparsifier.groups):
raise ValueError("Expected {} lr_lambdas, but got {}".format(
len(sparsifier.groups), len(sl_lambda)))
self.sl_lambdas = list(sl_lambda)
super(LambdaSL, self).__init__(sparsifier, last_epoch, verbose)
def get_sl(self):
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
"please use `get_last_sl()`.")
return [base_sl * lmbda(self.last_epoch)
for lmbda, base_sl in zip(self.sl_lambdas, self.base_sl)]
|
pytorch-master
|
torch/ao/sparsity/scheduler/lambda_scheduler.py
|
from torch.ao.nn import sparse
|
pytorch-master
|
torch/ao/nn/__init__.py
|
from . import quantized
|
pytorch-master
|
torch/ao/nn/sparse/__init__.py
|
from typing import Optional
import torch
from torch.nn.quantized.modules.utils import _quantize_weight, hide_packed_params_repr
__all__ = ['LinearPackedParams', 'Linear']
# TODO (zaf): Inherit from `quantized.LinearPackedParams` (T83294430)
class LinearPackedParams(torch.nn.Module):
_version = 1
def __init__(self, row_block_size=1, col_block_size=4, dtype=torch.qint8):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError("Linear prepacking only supports QINT8")
self.dtype = dtype
wq = torch._empty_affine_quantized([1, 1], scale=1.0, zero_point=0, dtype=torch.qint8)
self.set_weight_bias(wq, None, row_block_size, col_block_size)
def _get_name(self):
return "SparseQuantizedLinearPackedParams"
@torch.jit.export
def set_weight_bias(self, weight: torch.Tensor, bias: Optional[torch.Tensor],
row_block_size: Optional[int], col_block_size: Optional[int]) -> None:
assert row_block_size is not None and col_block_size is not None
self._packed_params = torch.ops.sparse.qlinear_prepack(weight, bias, row_block_size, col_block_size)
@torch.jit.export
def _weight_bias(self):
(weight, bias, block_sizes) = torch.ops.sparse.qlinear_unpack(self._packed_params)
return (weight, bias, block_sizes[0], block_sizes[1])
def forward(self, x):
return x
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'dtype'] = self.dtype
destination[prefix + '_packed_params'] = self._weight_bias()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
assert version <= self._version
self.dtype = state_dict.pop(prefix + 'dtype')
weight, bias, row_block_size, col_block_size = state_dict.pop(prefix + '_packed_params')
self.set_weight_bias(weight, bias, row_block_size, col_block_size)
super()._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
@torch.jit.export
def __getstate__(self):
return self._packed_params, self.training, self.dtype
@torch.jit.export
def __setstate__(self, state):
(self._packed_params, self.training, self.dtype) = state
def __repr__(self):
return self._weight_bias().__repr__()
# TODO (zaf): Inherit from `quantized.Linear` (T83294430)
class Linear(torch.nn.Module):
r"""
A quantized sparse linear module with quantized tensor as inputs and outputs.
"""
_version = 1
_FLOAT_MODULE = torch.nn.Linear
def __init__(self, in_features, out_features, row_block_size, col_block_size, bias=True, dtype=torch.qint8):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError("Only QINT8 is supported for Sparse Quantized Linear")
self.in_features = in_features
self.out_features = out_features
if bias:
bias = torch.zeros(self.out_features, dtype=torch.float)
else:
bias = None
qweight = torch._empty_affine_quantized([out_features, in_features],
scale=1, zero_point=0, dtype=torch.qint8)
self._packed_params = LinearPackedParams(row_block_size=row_block_size,
col_block_size=col_block_size,
dtype=dtype)
self._packed_params.set_weight_bias(qweight, bias, row_block_size, col_block_size)
self.scale = 1.0
self.zero_point = 0
@classmethod
def _get_name(cls):
return 'SparseQuantizedLinear'
def extra_repr(self):
return 'in_features={}, out_features={}, scale={}, zero_point={}, qscheme={}'.format(
self.in_features, self.out_features, self.scale, self.zero_point, self.weight().qscheme()
)
def __repr__(self):
return hide_packed_params_repr(self, LinearPackedParams)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.sparse.qlinear(x, self._packed_params._packed_params, self.scale, self.zero_point)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'scale'] = torch.tensor(self.scale)
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.scale = float(state_dict[prefix + 'scale'])
state_dict.pop(prefix + 'scale')
self.zero_point = int(state_dict[prefix + 'zero_point'])
state_dict.pop(prefix + 'zero_point')
op_type = int(state_dict[prefix + 'op_type'])
state_dict.pop(prefix + 'op_type')
version = local_metadata.get('version', None)
assert version <= self._version
super()._load_from_state_dict(
state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def _weight_bias(self):
return self._packed_params._weight_bias()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor],
row_block_size: Optional[int], col_block_size: Optional[int]) -> None:
assert row_block_size is not None and col_block_size is not None
self._packed_params.set_weight_bias(w, b, row_block_size, col_block_size)
@classmethod
def from_float(cls, mod):
r"""Create a quantized sparse module from a float module.
We only care about the convert at this stage, no need for observers just yet.
TODO(zaf): Need to add the sparse params to the qconfig
"""
assert type(mod) == cls._FLOAT_MODULE, cls._get_name() + \
'.from_float only works for ' + cls._FLOAT_MODULE.__name__
assert hasattr(mod, 'sparse_params'), \
('Expecting the Linear to have `sparse_params`. Make sure you have provided arguments '
'in the `sparsifier.squash_mask(params_to_save=("sparse_block_shape",))` method.')
sparse_block_shape = mod.sparse_params.get('sparse_block_shape', None) # type: ignore[operator, union-attr]
assert isinstance(sparse_block_shape, (tuple, list))
assert len(sparse_block_shape) == 2
# TODO: Need to add options to qconfig to avoid the calibration.
# TODO: Add calibration for the sparsity
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
activation_post_process = mod.activation_post_process
weight_post_process = mod.qconfig.weight() # type: ignore[operator, union-attr]
# Assumption is that the weight is already sparsified by the
# `sparsifier.convert`
weight = mod.weight
weight_post_process(weight)
dtype = weight_post_process.dtype
act_scale, act_zp = activation_post_process.calculate_qparams() # type: ignore[operator, union-attr]
assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
w_sc, w_zp = weight_post_process.calculate_qparams()
if isinstance(w_zp, torch.Tensor):
assert not torch.any(w_zp.bool()), "All weight zero points must map to 0"
else:
assert w_zp == 0, 'Weight zero point must map to 0'
qweight = _quantize_weight(weight.float(), weight_post_process)
row_block_size = mod.sparse_params['sparse_block_shape'][0] # type: ignore[index]
col_block_size = mod.sparse_params['sparse_block_shape'][1] # type: ignore[index]
qlinear = cls(mod.in_features,
mod.out_features,
row_block_size,
col_block_size,
dtype=dtype)
qlinear.set_weight_bias(qweight, mod.bias,
row_block_size, col_block_size) # type: ignore[arg-type]
qlinear.scale = float(act_scale)
qlinear.zero_point = int(act_zp)
return qlinear
|
pytorch-master
|
torch/ao/nn/sparse/quantized/linear.py
|
from torch.ao.nn.sparse.quantized import dynamic
from .linear import Linear
from .linear import LinearPackedParams
__all__ = [
"dynamic",
"Linear",
"LinearPackedParams",
]
|
pytorch-master
|
torch/ao/nn/sparse/quantized/__init__.py
|
import threading
def is_valid_linear_block_sparse_pattern(row_block_size, col_block_size):
return (row_block_size == 1 and col_block_size == 4) or \
(row_block_size == 8 and col_block_size == 1)
# This is a stop-gap measure as current flow does not allow module
# specific block sparse pattern.
# Infact there is no way to convey sparse pattern via module config
# of quantization flow. Thus using the global context to convey
# sparsity pattern.
# Once the flow supports it, this should be removed.
class LinearBlockSparsePattern:
rlock = threading.RLock()
row_block_size = 1
col_block_size = 4
prev_row_block_size = 1
prev_col_block_size = 4
def __init__(self, row_block_size=1, col_block_size=4):
assert(is_valid_linear_block_sparse_pattern(row_block_size, col_block_size))
LinearBlockSparsePattern.rlock.acquire()
LinearBlockSparsePattern.prev_row_block_size = LinearBlockSparsePattern.row_block_size
LinearBlockSparsePattern.prev_col_block_size = LinearBlockSparsePattern.col_block_size
LinearBlockSparsePattern.row_block_size = row_block_size
LinearBlockSparsePattern.col_block_size = col_block_size
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, backtrace):
LinearBlockSparsePattern.row_block_size = LinearBlockSparsePattern.prev_row_block_size
LinearBlockSparsePattern.col_block_size = LinearBlockSparsePattern.prev_col_block_size
LinearBlockSparsePattern.rlock.release()
@staticmethod
def block_size():
return LinearBlockSparsePattern.row_block_size, LinearBlockSparsePattern.col_block_size
|
pytorch-master
|
torch/ao/nn/sparse/quantized/utils.py
|
from typing import Optional
from torch.ao.nn.sparse.quantized import linear
from torch.ao.nn.sparse.quantized.utils import LinearBlockSparsePattern
import torch
import torch.nn.intrinsic as nni
from torch.nn.quantized.modules.utils import _quantize_weight, hide_packed_params_repr
__all__ = ['Linear']
class Linear(torch.nn.Module):
r"""
A dynamically quantized sparse linear module with float tensor as inputs and outputs.
"""
_version = 1
_op_type = "sparse_dynamic"
_FLOAT_MODULE = torch.nn.Linear
def __init__(self, in_features, out_features, row_block_size, col_block_size, bias=True, dtype=torch.qint8):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError("Only QINT8 is supported for Sparse Quantized Linear Dynamic")
self.in_features = in_features
self.out_features = out_features
if bias:
bias = torch.zeros(self.out_features, dtype=torch.float)
else:
bias = None
qweight = torch._empty_affine_quantized([out_features, in_features],
scale=1, zero_point=0, dtype=torch.qint8)
self._packed_params = linear.LinearPackedParams(row_block_size=row_block_size,
col_block_size=col_block_size,
dtype=dtype)
self._packed_params.set_weight_bias(qweight, bias, row_block_size, col_block_size)
def _get_name(self):
return 'SparseQuantizedDynamicLinear'
def extra_repr(self):
return 'in_features={}, out_features={}, qscheme={}'.format(
self.in_features, self.out_features, self.weight().qscheme()
)
def __repr__(self):
return hide_packed_params_repr(self, linear.LinearPackedParams)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.sparse.qlinear_dynamic(x, self._packed_params._packed_params)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'op_type'] = self._op_type
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
op_type = int(state_dict[prefix + 'op_type'])
assert op_type == 'sparse', \
"Cannot load from op_type [{}], expecting [{}]".format(op_type, self._op_type)
state_dict.pop(prefix + 'op_type')
version = local_metadata.get('version', None)
assert version <= self._version
# Is this code valid? In old quantization it seemed to be used to load
# older model
weight = state_dict.pop(prefix + 'weight')
bias = state_dict.pop(prefix + 'bias')
state_dict.update({prefix + '_packed_params.weight': weight,
prefix + '_packed_params.bias': bias})
super()._load_from_state_dict(
state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def _weight_bias(self):
return self._packed_params._weight_bias()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor],
row_block_size: Optional[int], col_block_size: Optional[int]) -> None:
assert row_block_size is not None and col_block_size is not None
self.out_features = w.shape[0]
self.in_features = w.shape[1]
self._packed_params.set_weight_bias(w, b, row_block_size, col_block_size)
@classmethod
def from_float(cls, mod):
r"""Create a quantized sparse dynamic module from a float module.
We only care about the convert at this stage, no need for observers just yet.
"""
assert type(mod) == cls._FLOAT_MODULE, ' nnq.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
# TODO: Need to add options to qconfig to avoid the calibration.
# TODO: Add calibration for the sparsity
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
if type(mod) == nni.LinearReLU:
mod = mod[0]
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer = mod.qconfig.weight()
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.ao.quantization.qconfig import default_dynamic_qconfig
weight_observer = default_dynamic_qconfig.weight()
# It is important to multiply by the mask BEFORE calling the `weight_observer`
# TODO (zaf): Mask might not be part of the qconfig (T83295194)
weight = mod.weight
if getattr(mod.qconfig, 'mask', False):
weight = mod.qconfig.mask * mod.weight
weight_observer(weight)
dtype = weight_observer.dtype
assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
w_sc, w_zp = weight_observer.calculate_qparams()
if isinstance(w_zp, torch.Tensor):
assert not torch.any(w_zp.bool()), "All weight zero points must map to 0"
else:
assert w_zp == 0, 'Weight zero point must map to 0'
qweight = _quantize_weight(weight.float(), weight_observer)
row_block_size, col_block_size = LinearBlockSparsePattern.block_size()
qlinear = cls(mod.in_features,
mod.out_features,
row_block_size,
col_block_size,
dtype=dtype)
qlinear.set_weight_bias(qweight, mod.bias, row_block_size, col_block_size)
return qlinear
|
pytorch-master
|
torch/ao/nn/sparse/quantized/dynamic/linear.py
|
from .linear import Linear
__all__ = [
"Linear",
]
|
pytorch-master
|
torch/ao/nn/sparse/quantized/dynamic/__init__.py
|
"""
Numeric Suite Core APIs for define-by-run quantization.
Experimental, API may change at any time.
"""
import functools
from typing import Tuple, Any, Optional, List, Dict
import torch
from torch.ao.quantization._dbr.quantization_state import (
AutoQuantizationState,
)
def _turn_on_loggers(name: str, model: torch.nn.Module) -> None:
for _, module in model.named_modules():
if isinstance(module, AutoQuantizationState):
module.logging_model_name = name
module.log_op_outputs = True
def add_loggers(
name_a: str,
model_a: torch.nn.Module,
name_b: str,
model_b: torch.nn.Module,
) -> Tuple[torch.nn.Module, torch.nn.Module]:
"""
Enables intermediate activation logging on model_a and model_b.
"""
_turn_on_loggers(name_a, model_a)
_turn_on_loggers(name_b, model_b)
return model_a, model_b
def _extract_logger_info_one_model(model: torch.nn.Module) -> Tuple[str, Any]:
results: Optional[List[List[Any]]] = None
model_name = None
for _, module in model.named_modules():
if isinstance(module, AutoQuantizationState):
if results is None:
# initialize results to the right length
results = [[] for i in range(len(module.op_outputs))]
assert results is not None
if model_name is None:
# model_name is the same everywhere in this model, take
# the first one
model_name = module.logging_model_name
for forward_idx, outputs in enumerate(module.op_outputs):
results[forward_idx].extend(outputs)
# sort each forward's results by global idx
assert results is not None
assert model_name is not None
for result_idx, result in enumerate(results):
result.sort(key=functools.cmp_to_key( # type: ignore[misc]
lambda a, b: 1 if a[0] > b[0] else -1)) # type: ignore[index]
return model_name, results
def extract_logger_info(
model_a: torch.nn.Module,
model_b: torch.nn.Module,
model_name_to_use_for_layer_names: str,
) -> Any:
"""
Extracts intermediate activations from model_a and model_b.
"""
model_name_a, results_a = _extract_logger_info_one_model(model_a)
model_name_b, results_b = _extract_logger_info_one_model(model_b)
assert len(results_a) == len(results_b), 'results length mismatch'
results: Dict[str, Any] = {}
if len(results_a) == 0:
return results
for op_idx in range(len(results_a[0])):
# currently using global_idx for layer_name
layer_name = (
results_a[0][op_idx][0]
if model_name_to_use_for_layer_names == model_name_a
else results_a[0][op_idx][0])
values_a = [results_a[forward_idx][op_idx][3]
for forward_idx in range(len(results_a))]
values_b = [results_b[forward_idx][op_idx][3]
for forward_idx in range(len(results_b))]
node_output = {
model_name_a: [{
'type': 'node_output',
'values': values_a,
'ref_node_target_type': str(results_a[0][op_idx][2]),
'fqn': str(results_a[0][op_idx][1]),
'index_of_arg': 0,
'index_within_arg': 0,
}],
model_name_b: [{
'type': 'node_output',
'values': values_b,
'ref_node_target_type': str(results_b[0][op_idx][2]),
'fqn': str(results_b[0][op_idx][1]),
'index_of_arg': 0,
'index_within_arg': 0,
}],
}
results[layer_name] = {
'node_output': node_output,
}
return results
|
pytorch-master
|
torch/ao/ns/_numeric_suite_dbr.py
|
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
from torch.ao.quantization import prepare
from typing import Dict, List, Optional, Any, Union, Callable, Set
from torch.ao.quantization.quantization_mappings import (
get_default_compare_output_module_list,
)
NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST = {
nnqd.Linear,
nnq.Linear,
nnqd.LSTM,
nn.LSTM,
}
def _find_match(
str_list: Union[Dict[str, Any], List[str]], key_str: str,
postfix: str,
) -> Optional[str]:
split_str = key_str.split(".")
if split_str[-1] == postfix:
match_string = "".join(key_str.split(".")[0:-1])
for s2 in str_list:
pattern1 = "".join(s2.split(".")[0:-1])
pattern2 = "".join(s2.split(".")[0:-2])
if match_string == pattern1:
return s2
if match_string == pattern2:
return s2
# For matching "fc.weight" and "fc._packed_params._packed_params"
if postfix == "_packed_params":
match_string = "".join(key_str.split(".")[0:-2])
if len(match_string) == 0:
return None
for s2 in str_list:
pattern1 = "".join(s2.split(".")[0:-1])
pattern2 = "".join(s2.split(".")[0:-2])
if match_string == pattern1:
return s2
if match_string == pattern2:
return s2
return None
else:
return None
def compare_weights(
float_dict: Dict[str, Any], quantized_dict: Dict[str, Any]
) -> Dict[str, Dict[str, torch.Tensor]]:
r"""Compare the weights of the float module with its corresponding quantized
module. Return a dict with key corresponding to module names and each entry being
a dictionary with two keys 'float' and 'quantized', containing the float and
quantized weights. This dict can be used to compare and compute the quantization
error of the weights of float and quantized models.
Example usage::
wt_compare_dict = compare_weights(
float_model.state_dict(), qmodel.state_dict())
for key in wt_compare_dict:
print(
key,
compute_error(
wt_compare_dict[key]['float'],
wt_compare_dict[key]['quantized'].dequantize()
)
)
Args:
float_dict: state dict of the float model
quantized_dict: state dict of the quantized model
Return:
weight_dict: dict with key corresponding to module names and each entry being
a dictionary with two keys 'float' and 'quantized', containing the float and
quantized weights
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_weights")
weight_dict: Dict[str, Dict] = {}
for key in quantized_dict:
match_key = _find_match(float_dict, key, "weight")
if match_key is not None:
weight_dict[key] = {}
weight_dict[key]["float"] = float_dict[match_key]
weight_dict[key]["quantized"] = quantized_dict[key]
continue
# For matching "fc.weight" and "fc._packed_params._packed_params"
match_key = _find_match(float_dict, key, "_packed_params")
if match_key is not None:
weight_dict[key] = {}
weight_dict[key]["float"] = float_dict[match_key]
weight_dict[key]["quantized"] = quantized_dict[key][0]
# For LSTM
split_str = key.split(".")
if split_str[-1] == "param" and split_str[-3] == "_all_weight_values":
layer = split_str[-2]
module_name = ".".join(split_str[:-3])
float_weight_ih_key = module_name + ".weight_ih_l" + layer
float_weight_hh_key = module_name + ".weight_hh_l" + layer
if float_weight_ih_key in float_dict and float_weight_hh_key in float_dict:
weight_dict[key] = {}
weight_dict[key]["float"] = float_dict[float_weight_ih_key]
weight_dict[key]["quantized"] = (
quantized_dict[key].__getstate__()[0][4][0].__getstate__()[0][0]
)
weight_dict[key]["float"] = float_dict[float_weight_hh_key]
weight_dict[key]["quantized"] = (
quantized_dict[key].__getstate__()[0][4][1].__getstate__()[0][0]
)
return weight_dict
def _get_logger_dict_helper(
mod: nn.Module, target_dict: Dict[str, Any],
prefix: str = "",
) -> None:
r"""This is the helper function for get_logger_dict
Args:
mod: module we want to save all logger stats
prefix: prefix for the current module
target_dict: the dictionary used to save all logger stats
"""
def get_prefix(prefix):
return prefix if prefix == "" else prefix + "."
for name, child in mod.named_children():
if isinstance(child, Logger):
target_dict[get_prefix(prefix) + "stats"] = child.stats
break
for name, child in mod.named_children():
module_prefix = get_prefix(prefix) + name if prefix else name
_get_logger_dict_helper(child, target_dict, module_prefix)
def get_logger_dict(mod: nn.Module, prefix: str = "") -> Dict[str, Dict]:
r"""Traverse the modules and save all logger stats into target dict.
This is mainly used for quantization accuracy debug.
Type of loggers supported:
ShadowLogger: used to log the outputs of the quantized module and its matching float shadow module,
OutputLogger: used to log the outputs of the modules
Args:
mod: module we want to save all logger stats
prefix: prefix for the current module
Return:
target_dict: the dictionary used to save all logger stats
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite.get_logger_dict")
target_dict: Dict[str, Dict] = {}
_get_logger_dict_helper(mod, target_dict, prefix)
return target_dict
class Logger(nn.Module):
r"""Base class for stats logging
"""
def __init__(self):
super(Logger, self).__init__()
self.stats = {}
# We only insert observer if the op is quantized with static quantization,
# which is identified by activation_observer.dtype == quint8. This is needed
# when attaching Logger as observer for FX mode
self.dtype = torch.quint8
def forward(self, x):
"""
""" # blank docblock to make autodoc happy
pass
class ShadowLogger(Logger):
r"""Class used in Shadow module to record the outputs of the original and
shadow modules.
"""
def __init__(self):
super(ShadowLogger, self).__init__()
self.stats["float"] = []
self.stats["quantized"] = []
def forward(self, x, y):
"""
""" # blank docblock to make autodoc happy
if len(x) > 1:
x = x[0]
if len(y) > 1:
y = y[0]
self.stats["quantized"].append(x.detach())
self.stats["float"].append(y.detach())
class OutputLogger(Logger):
r"""Class used to log the outputs of the module
"""
def __init__(self):
super(OutputLogger, self).__init__()
self.stats["tensor_val"] = []
def forward(self, x):
"""
""" # blank docblock to make autodoc happy
self.stats["tensor_val"].append(x)
return x
def _convert_tuple_to_list(t: Any) -> Any:
return list(_convert_tuple_to_list(x) for x in t) if type(t) is tuple else t
def _dequantize_tensor_list(t: Any) -> Any:
return (
list(_dequantize_tensor_list(x) for x in t)
if type(t) is list
else t.dequantize()
if t.is_quantized
else t
)
class Shadow(nn.Module):
r"""Shadow module attaches the float module to its matching quantized module
as the shadow. Then it uses Logger module to process the outputs of both
modules.
Args:
q_module: module quantized from float_module that we want to shadow
float_module: float module used to shadow q_module
logger_cls: type of logger used to process the outputs of q_module and
float_module. ShadowLogger or custom loggers can be used.
"""
def __init__(self, q_module, float_module, logger_cls):
super(Shadow, self).__init__()
self.orig_module = q_module
self.shadow_module = float_module
self.dequant = nnq.DeQuantize()
self.logger = logger_cls()
def forward(self, *x) -> torch.Tensor:
"""
""" # blank docblock to make autodoc happy
xl = _convert_tuple_to_list(x)
output = self.orig_module(*xl)
xl_float = _dequantize_tensor_list(xl)
shadow_output = self.shadow_module(*xl_float)
self.logger(output, shadow_output)
return output
def add(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
""" # blank docblock to make autodoc happy
output = self.orig_module.add(x, y)
x = x.dequantize()
y = y.dequantize()
shadow_output = self.shadow_module.add(x, y)
self.logger(output, shadow_output)
return output
def add_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
"""
""" # blank docblock to make autodoc happy
output = self.orig_module.add_scalar(x, y)
x = x.dequantize()
shadow_output = self.shadow_module.add_scalar(x, y)
self.logger(output, shadow_output)
return output
def mul(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
""" # blank docblock to make autodoc happy
output = self.orig_module.mul(x, y)
x = x.dequantize()
y = y.dequantize()
shadow_output = self.shadow_module.mul(x, y)
self.logger(output, shadow_output)
return output
def mul_scalar(self, x: torch.Tensor, y: float) -> torch.Tensor:
"""
""" # blank docblock to make autodoc happy
output = self.orig_module.mul_scalar(x, y)
x = x.dequantize()
shadow_output = self.shadow_module.mul_scalar(x, y)
self.logger(output, shadow_output)
return output
def cat(self, x: List[torch.Tensor], dim: int = 0) -> torch.Tensor:
"""
""" # blank docblock to make autodoc happy
output = self.orig_module.cat(x, dim)
x = [y.dequantize() for y in x]
shadow_output = self.shadow_module.cat(x, dim)
self.logger(output, shadow_output)
return output
def add_relu(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
""" # blank docblock to make autodoc happy
output = self.orig_module.add_relu(x, y)
x = x.dequantize()
y = y.dequantize()
shadow_output = self.shadow_module.add_relu(x, y)
self.logger(output, shadow_output)
return output
def prepare_model_with_stubs(
float_module: nn.Module, q_module: nn.Module,
module_swap_list: Set[type], logger_cls: Callable,
) -> None:
r"""Prepare the model by attaching the float module to its matching quantized
module as the shadow if the float module type is in module_swap_list.
Example usage::
prepare_model_with_stubs(float_model, q_model, module_swap_list, Logger)
q_model(data)
ob_dict = get_logger_dict(q_model)
Args:
float_module: float module used to generate the q_module
q_module: module quantized from float_module
module_swap_list: list of float module types to attach the shadow
logger_cls: type of logger to be used in shadow module to process the outputs of
quantized module and its float shadow module
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_with_stubs")
float_module_children = {}
for name, mod in float_module.named_children():
float_module_children[name] = mod
reassign = {}
for name, mod in q_module.named_children():
if name not in float_module_children:
continue
float_mod = float_module_children[name]
if type(float_mod) not in module_swap_list:
prepare_model_with_stubs(float_mod, mod, module_swap_list, logger_cls)
# Insert shadow module only if the module is not of the same type as
# the floating point module
if type(float_mod) in module_swap_list and not _is_identical_module_type(mod, float_mod):
reassign[name] = Shadow(mod, float_mod, logger_cls)
for key, value in reassign.items():
q_module._modules[key] = value
def _is_identical_module_type(mod1, mod2):
# Compare if two modules have the same dtype
mod1_module_types = [type(mod) for mod in mod1.modules()]
mod2_module_types = [type(mod) for mod in mod2.modules()]
return mod1_module_types == mod2_module_types
def compare_model_stub(
float_model: nn.Module, q_model: nn.Module, module_swap_list: Set[type],
*data, logger_cls=ShadowLogger
) -> Dict[str, Dict]:
r"""Compare quantized module in a model with its floating point counterpart,
feeding both of them the same input. Return a dict with key corresponding to
module names and each entry being a dictionary with two keys 'float' and
'quantized', containing the output tensors of quantized and its matching
float shadow module. This dict can be used to compare and compute the module
level quantization error.
This function first call prepare_model_with_stubs() to swap the quantized
module that we want to compare with the Shadow module, which takes quantized
module, corresponding float module and logger as input, and creates a forward
path inside to make the float module to shadow quantized module sharing the
same input. The logger can be customizable, default logger is ShadowLogger
and it will save the outputs of the quantized module and float module that
can be used to compute the module level quantization error.
Example usage::
module_swap_list = [torchvision.models.quantization.resnet.QuantizableBasicBlock]
ob_dict = compare_model_stub(float_model,qmodel,module_swap_list, data)
for key in ob_dict:
print(key, compute_error(ob_dict[key]['float'], ob_dict[key]['quantized'].dequantize()))
Args:
float_model: float model used to generate the q_model
q_model: model quantized from float_model
module_swap_list: list of float module types at which shadow modules will
be attached.
data: input data used to run the prepared q_model
logger_cls: type of logger to be used in shadow module to process the outputs of
quantized module and its float shadow module
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_stub")
prepare_model_with_stubs(float_model, q_model, module_swap_list, logger_cls)
q_model(*data)
ob_dict = get_logger_dict(q_model)
return ob_dict
def get_matching_activations(
float_module: nn.Module, q_module: nn.Module,
) -> Dict[str, Dict[str, torch.Tensor]]:
r"""Find the matching activation between float and quantized modules.
Args:
float_module: float module used to generate the q_module
q_module: module quantized from float_module
Return:
act_dict: dict with key corresponding to quantized module names and each
entry being a dictionary with two keys 'float' and 'quantized', containing
the matching float and quantized activations
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite.get_matching_activations")
float_dict = get_logger_dict(float_module)
quantized_dict = get_logger_dict(q_module)
act_dict: Dict[str, Dict] = {}
for key in quantized_dict:
if len(quantized_dict[key]["tensor_val"]) == 0:
continue
match_key = _find_match(sorted(float_dict, reverse=True), key, "stats")
if match_key is not None:
act_dict[key] = {}
act_dict[key]["float"] = float_dict[match_key]["tensor_val"]
act_dict[key]["quantized"] = quantized_dict[key]["tensor_val"]
return act_dict
def prepare_model_outputs(
float_module: nn.Module,
q_module: nn.Module,
logger_cls=OutputLogger,
allow_list=None
) -> None:
r"""Prepare the model by attaching the logger to both float module
and quantized module if they are in the allow_list.
Args:
float_module: float module used to generate the q_module
q_module: module quantized from float_module
logger_cls: type of logger to be attached to float_module and q_module
allow_list: list of module types to attach logger
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite.prepare_model_outputs")
if allow_list is None:
allow_list = get_default_compare_output_module_list()
qconfig_debug = torch.ao.quantization.QConfig(activation=logger_cls, weight=None)
float_module.qconfig = qconfig_debug # type: ignore[assignment]
prepare(float_module, inplace=True, allow_list=allow_list, prepare_custom_config_dict={})
q_module.qconfig = qconfig_debug # type: ignore[assignment]
prepare(
q_module,
inplace=True,
allow_list=allow_list,
observer_non_leaf_module_list=NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
prepare_custom_config_dict={}
)
def compare_model_outputs(
float_model: nn.Module,
q_model: nn.Module,
*data,
logger_cls=OutputLogger,
allow_list=None
) -> Dict[str, Dict[str, torch.Tensor]]:
r"""Compare output activations between float and quantized models at
corresponding locations for the same input. Return a dict with key corresponding
to quantized module names and each entry being a dictionary with two keys
'float' and 'quantized', containing the activations of quantized model and
float model at matching locations. This dict can be used to compare and
compute the propagation quantization error.
Example usage::
act_compare_dict = compare_model_outputs(float_model, qmodel, data)
for key in act_compare_dict:
print(
key,
compute_error(
act_compare_dict[key]['float'],
act_compare_dict[key]['quantized'].dequantize()
)
)
Args:
float_model: float model used to generate the q_model
q_model: model quantized from float_model
data: input data used to run the prepared float_model and q_model
logger_cls: type of logger to be attached to float_module and q_module
allow_list: list of module types to attach logger
Return:
act_compare_dict: dict with key corresponding to quantized module names
and each entry being a dictionary with two keys 'float' and 'quantized',
containing the matching float and quantized activations
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite.compare_model_outputs")
if allow_list is None:
allow_list = get_default_compare_output_module_list()
prepare_model_outputs(float_model, q_model, logger_cls, allow_list)
float_model(*data)
q_model(*data)
act_compare_dict = get_matching_activations(float_model, q_model)
return act_compare_dict
|
pytorch-master
|
torch/ao/ns/_numeric_suite.py
|
pytorch-master
|
torch/ao/ns/__init__.py
|
|
"""
This module contains tooling to compare weights and activations
across models. Example usage::
import copy
import torch
import torch.quantization.quantize_fx as quantize_fx
import torch.ao.ns._numeric_suite_fx as ns
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)).eval()
mp = quantize_fx.prepare_fx(m, {'': torch.quantization.default_qconfig})
# We convert a copy because we need the original prepared model
# to be available for comparisons, and `quantize_fx.convert_fx` is inplace.
mq = quantize_fx.convert_fx(copy.deepcopy(mp))
#
# Comparing weights
#
# extract weight pairs
weight_comparison = ns.extract_weights('a', mp, 'b', mq)
# add SQNR for each comparison, inplace
ns.extend_logger_results_with_comparison(
weight_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
'sqnr')
# weight_comparison contains the weights from `mp` and `mq` stored
# in pairs, and can be used for further analysis.
#
# Comparing activations, with error propagation
#
# add loggers
mp_ns, mq_ns = ns.add_loggers(
'a', copy.deepcopy(mp),
'b', copy.deepcopy(mq),
ns.OutputLogger)
# send an example datum to capture intermediate activations
datum = torch.randn(1, 1, 1, 1)
mp_ns(datum)
mq_ns(datum)
# extract intermediate activations
act_comparison = ns.extract_logger_info(
mp_ns, mq_ns, ns.OutputLogger, 'b')
# add SQNR for each comparison, inplace
ns.extend_logger_results_with_comparison(
act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
'sqnr')
# act_comparison contains the activations from `mp_ns` and `mq_ns` stored
# in pairs, and can be used for further analysis.
#
# Comparing activations, without error propagation
#
# create shadow model
mp_shadows_mq = ns.add_shadow_loggers(
'a', copy.deepcopy(mp),
'b', copy.deepcopy(mq),
ns.OutputLogger)
# send an example datum to capture intermediate activations
datum = torch.randn(1, 1, 1, 1)
mp_shadows_mq(datum)
# extract intermediate activations
shadow_act_comparison = ns.extract_shadow_logger_info(
mp_shadows_mq, ns.OutputLogger, 'b')
# add SQNR for each comparison, inplace
ns.extend_logger_results_with_comparison(
shadow_act_comparison, 'a', 'b', torch.ao.ns.fx.utils.compute_sqnr,
'sqnr')
# shadow_act_comparison contains the activations from `mp_ns` and `mq_ns` stored
# in pairs, and can be used for further analysis.
"""
import collections
import torch
import torch.nn as nn
import torch.ao.quantization.quantize_fx as quantize_fx
from torch.fx import GraphModule
from torch.fx.graph import Node
from torch.ao.ns.fx.mappings import (
get_base_name_to_sets_of_related_ops,
)
from torch.ao.ns.fx.graph_matcher import (
get_matching_subgraph_pairs,
get_type_a_related_to_b,
)
from .fx.weight_utils import (
extract_weight_from_node,
)
from .fx.graph_passes import (
add_loggers_to_model,
create_a_shadows_b,
)
from .fx.utils import (
rekey_logger_info_on_node_name_of_model,
maybe_add_missing_fqns,
get_target_type_str,
)
from .fx.ns_types import (
NSSingleResultValuesType,
NSResultsType,
NSNodeTargetType,
)
from typing import Dict, Tuple, Callable, List, Optional, Set
RNNReturnType = Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]
class OutputLogger(nn.Module):
"""
Base class for capturing intermediate values.
"""
stats: List[torch.Tensor]
stats_rnn: List[RNNReturnType]
# Mark as impure so that calls to it will not be removed during DCE.
_is_impure = True
def __init__(
self,
ref_node_name: str,
prev_node_name: str,
model_name: str,
ref_name: str,
prev_node_target_type: str,
ref_node_target_type: str,
results_type: str,
index_within_arg: int,
index_of_arg: int,
fqn: Optional[str],
):
super().__init__()
self.stats: List[torch.Tensor] = []
self.stats_rnn: List[RNNReturnType] = []
# name of the node which was responsible for adding this logger
# Note:
# - if we are logging node outputs, this is the same as prev_node_name
# - if we are logging node inputs, this is the name of the node
# whose input this logger is logging.
#
# example, where logger1 is logging input of op1 and logger2 is logging
# the output of op1:
#
# x1 -> logger1 -> op1 -> logger2 -> x2
#
# in this example,
# - logger1's prev_node_name is x1 and ref_node_name is op1
# - logger2's prev_node_name is op1 and ref_node_name is op1
self.ref_node_name = ref_node_name
# name of the node whose output this Logger is capturing
self.prev_node_name = prev_node_name
# name of the model from which the node originated from
self.model_name = model_name
# reference name, used to match loggers from separate models
# to each other
self.ref_name = ref_name
# type of the target of the node whose output this logger is logging
self.prev_node_target_type = prev_node_target_type
# type of the target of the node which was respondible for adding this
# logger
self.ref_node_target_type = ref_node_target_type
# what kind of values are inside of stats
self.results_type = results_type
# index of this node within the arg of the input/output node
# for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1
self.index_within_arg = index_within_arg
# index of this node within the args of the input/output node
# for example, in add(x1, x2), x2 would have index_of_arg == 1
self.index_of_arg = index_of_arg
# fully qualified name
self.fqn = fqn
# Note: cannot annotate the type of x because TorchScript does not support
# the Union type.
def forward(self, x):
"""
""" # blank docblock to make autodoc happy
if isinstance(x, torch.Tensor):
self.stats.append(x.detach())
elif isinstance(x, tuple) and len(x) == 2 and len(x[1]) == 2:
new_res = (x[0].detach(), (x[1][0].detach(), x[1][1].detach()))
self.stats_rnn.append(new_res)
return x
def __repr__(self):
return f"""OutputLogger(ref_name={self.ref_name}, model_name={self.model_name},
prev_node_name={self.prev_node_name}, ref_node_name={self.ref_node_name},
ref_node_target_type={self.ref_node_target_type}
results_type={self.results_type}, index_within_arg={self.index_within_arg},
index_of_arg={self.index_of_arg}, fqn={self.fqn})"""
class NSTracer(quantize_fx.QuantizationTracer):
"""
Just like a regular FX quantization tracer, but treats observers and fake_quantize
modules as leaf modules.
"""
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
"""
""" # blank docblock to make autodoc happy
if isinstance(m, torch.ao.quantization.ObserverBase):
return True
elif isinstance(m, torch.ao.quantization.FakeQuantizeBase):
return True
return super().is_leaf_module(m, module_qualified_name)
def _extract_weights_one_model(
model_name: str,
model: GraphModule,
nodes_and_names_to_instrument: List[Tuple[Node, str]],
results: NSResultsType,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> None:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_one_model")
for node, ref_name in nodes_and_names_to_instrument:
res_type = NSSingleResultValuesType.WEIGHT.value
extracted_weight = extract_weight_from_node(
node, model, op_to_type_to_weight_extraction_fn)
if extracted_weight:
if ref_name not in results:
results[ref_name] = {res_type: {}}
results[ref_name][res_type][model_name] = [extracted_weight]
def _extract_weights_impl(
model_name_a: str,
gm_a: GraphModule,
model_name_b: str,
gm_b: GraphModule,
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> NSResultsType:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_impl")
matched_subgraph_pairs = get_matching_subgraph_pairs(
gm_a, gm_b, base_name_to_sets_of_related_ops,
unmatchable_types_map)
# split the subgraph pairs into one data structure for each model
nodes_and_names_to_instrument_a: List[Tuple[Node, str]] = []
nodes_and_names_to_instrument_b: List[Tuple[Node, str]] = []
for match_name, match in matched_subgraph_pairs.items():
subgraph_a, subgraph_b = match
nodes_and_names_to_instrument_a.append((subgraph_a.base_op_node, match_name))
nodes_and_names_to_instrument_b.append((subgraph_b.base_op_node, match_name))
# populate the results, one model at a time
results: NSResultsType = {}
_extract_weights_one_model(
model_name_a, gm_a, nodes_and_names_to_instrument_a, results,
op_to_type_to_weight_extraction_fn)
_extract_weights_one_model(
model_name_b, gm_b, nodes_and_names_to_instrument_b, results,
op_to_type_to_weight_extraction_fn)
# fill in missing fqn entries
maybe_add_missing_fqns(results)
# rekey on names of nodes in gm_b
results = rekey_logger_info_on_node_name_of_model(results, model_name_b)
return results
def extract_weights(
model_name_a: str,
model_a: nn.Module,
model_name_b: str,
model_b: nn.Module,
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> NSResultsType:
"""
Extract weights from model A and model B, and return a comparison.
Args:
model_name_a: string name of model A to use in results
model_a: model A
model_name_b: string name of model B to use in results
model_b: model B
base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
unmatchable_types_map: optional override of unmatchable types, subject to change
op_to_type_to_weight_extraction_fn: optional override of function which extracts weight
from a type, subject to change
Return:
NSResultsType, containing the weight comparisons
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_weights")
if base_name_to_sets_of_related_ops is None:
base_name_to_sets_of_related_ops = \
get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
# TODO(future PR): expose these
skipped_module_names: List[str] = []
skipped_module_classes: List[Callable] = []
tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
gm_a = GraphModule(model_a, tracer_a.trace(model_a))
if hasattr(model_a, '_node_name_to_scope'):
gm_a._node_name_to_scope = model_a._node_name_to_scope
gm_b = GraphModule(model_b, tracer_b.trace(model_b))
if hasattr(model_b, '_node_name_to_scope'):
gm_b._node_name_to_scope = model_b._node_name_to_scope
return _extract_weights_impl(
model_name_a, gm_a, model_name_b, gm_b, base_name_to_sets_of_related_ops,
unmatchable_types_map, op_to_type_to_weight_extraction_fn)
def _add_loggers_one_model(
model_name: str,
model: GraphModule,
nodes_and_names_to_instrument_inputs: List[Tuple[Node, str, str]],
nodes_and_names_to_instrument_outputs: List[Tuple[Node, str, str]],
logger_cls: Callable,
) -> nn.Module:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_one_model")
# TODO(future PR): do not observe nodes we do not care
# about (both fp32, denylist, etc)
node_to_instrument_inputs_to_ref_name: Dict[Node, Tuple[str, str]] = {}
node_to_instrument_outputs_to_ref_name: Dict[Node, Tuple[str, str]] = {}
for node, ref_name, ref_node_type in nodes_and_names_to_instrument_inputs:
node_to_instrument_inputs_to_ref_name[node] = (ref_name, ref_node_type)
for node, ref_name, ref_node_type in nodes_and_names_to_instrument_outputs:
node_to_instrument_outputs_to_ref_name[node] = (ref_name, ref_node_type)
model = add_loggers_to_model(
model, node_to_instrument_inputs_to_ref_name,
node_to_instrument_outputs_to_ref_name, logger_cls, model_name)
return model
def _add_loggers_impl(
name_a: str,
gm_a: GraphModule,
name_b: str,
gm_b: GraphModule,
logger_cls: Callable,
should_log_inputs: bool,
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> Tuple[nn.Module, nn.Module]:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_loggers_impl")
matched_subgraph_pairs = get_matching_subgraph_pairs(
gm_a, gm_b,
base_name_to_sets_of_related_ops, unmatchable_types_map)
nodes_and_names_to_instrument_inputs_a = []
nodes_and_names_to_instrument_inputs_b = []
nodes_and_names_to_instrument_outputs_a = []
nodes_and_names_to_instrument_outputs_b = []
for match_name, (subgraph_a, subgraph_b) in matched_subgraph_pairs.items():
ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a)
ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b)
# Note: for matching inputs we use start_node, such as observing
# the input of linear in linear-relu
if should_log_inputs:
nodes_and_names_to_instrument_inputs_a.append(
(subgraph_a.start_node, match_name, ref_node_type_a))
nodes_and_names_to_instrument_inputs_b.append(
(subgraph_b.start_node, match_name, ref_node_type_b))
# Note: for matching activations we always use end_node,
# such as observing the output of relu in linear-relu
nodes_and_names_to_instrument_outputs_a.append(
(subgraph_a.end_node, match_name, ref_node_type_a))
nodes_and_names_to_instrument_outputs_b.append(
(subgraph_b.end_node, match_name, ref_node_type_b))
new_model_a = _add_loggers_one_model(
name_a, gm_a, nodes_and_names_to_instrument_inputs_a,
nodes_and_names_to_instrument_outputs_a, logger_cls)
new_model_b = _add_loggers_one_model(
name_b, gm_b, nodes_and_names_to_instrument_inputs_b,
nodes_and_names_to_instrument_outputs_b, logger_cls)
return (new_model_a, new_model_b)
def add_loggers(
name_a: str,
model_a: nn.Module,
name_b: str,
model_b: nn.Module,
logger_cls: Callable,
should_log_inputs : bool = False,
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> Tuple[nn.Module, nn.Module]:
"""
Instrument model A and model B with loggers.
Args:
model_name_a: string name of model A to use in results
model_a: model A
model_name_b: string name of model B to use in results
model_b: model B
logger_cls: class of Logger to use
base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
unmatchable_types_map: optional override of unmatchable types, subject to change
Return:
Returns a tuple of (model_a_with_loggers, model_b_with_loggers). Modifies both models inplace.
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_loggers")
# TODO(future PR): expose these
skipped_module_names: List[str] = []
skipped_module_classes: List[Callable] = []
tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
gm_a = GraphModule(model_a, tracer_a.trace(model_a))
if hasattr(model_a, '_node_name_to_scope'):
gm_a._node_name_to_scope = model_a._node_name_to_scope
gm_b = GraphModule(model_b, tracer_b.trace(model_b))
if hasattr(model_b, '_node_name_to_scope'):
gm_b._node_name_to_scope = model_b._node_name_to_scope
return _add_loggers_impl(
name_a, gm_a, name_b, gm_b, logger_cls,
should_log_inputs=should_log_inputs,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
unmatchable_types_map=unmatchable_types_map)
def _extract_logger_info_one_model(
model: nn.Module,
results: NSResultsType,
logger_cls: Callable,
) -> None:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_logger_info_one_model")
for gm_name, mod in model.named_modules():
# TODO(future PR): better check when scripted
is_logger = (
isinstance(mod, logger_cls) # type: ignore[arg-type]
or (
isinstance(mod, torch.jit.RecursiveScriptModule)
and mod.original_name == 'OutputLogger'
)
)
if is_logger:
key = mod.ref_name
if key not in results:
results[key] = {}
assert mod.model_name not in results[key], \
f"{mod.model_name} is already present in results"
if mod.results_type not in results[key]:
results[key][mod.results_type] = {}
if mod.model_name not in results[key][mod.results_type]:
results[key][mod.results_type][mod.model_name] = []
stats_to_use = mod.stats
if len(mod.stats_rnn) > 0:
stats_to_use = mod.stats_rnn
results[key][mod.results_type][mod.model_name].append({
'type': mod.results_type,
'values': stats_to_use,
'ref_node_name': mod.ref_node_name,
'ref_node_target_type': mod.ref_node_target_type,
'prev_node_name': mod.prev_node_name,
'prev_node_target_type': mod.prev_node_target_type,
'index_within_arg': mod.index_within_arg,
'index_of_arg': mod.index_of_arg,
'fqn': mod.fqn,
})
# ensure the list stays sorted
results[key][mod.results_type][mod.model_name].sort(
key=lambda res:
f"{res['index_of_arg']}:{res['index_within_arg']}"
)
# TODO(future PR): align on naming
# this is equivalent of just the comparison extraction part of `ns.compare_model_outputs`
def extract_logger_info(
model_a: nn.Module,
model_b: nn.Module,
logger_cls: Callable,
model_name_to_use_for_layer_names: str,
) -> NSResultsType:
"""
Traverse all loggers in `model_a` and `model_b`, and extract the logged
information.
Args:
model_a: model A
model_b: model B
logger_cls: class of Logger to use
model_name_to_use_for_layer_names: string name of model to use for
layer names in the output
Return:
NSResultsType, containing the logged comparisons
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_logger_info")
results: NSResultsType = {}
for model in (model_a, model_b):
_extract_logger_info_one_model(model, results, logger_cls)
# fill in missing fqn entries
maybe_add_missing_fqns(results)
# rekey on the name of model b
results = rekey_logger_info_on_node_name_of_model(
results, model_name_to_use_for_layer_names)
return results
def _add_shadow_loggers_impl(
name_a: str,
gm_a: GraphModule,
name_b: str,
gm_b: GraphModule,
logger_cls: Callable,
should_log_inputs: bool,
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> nn.Module:
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._add_shadow_loggers_impl")
matched_subgraph_pairs = get_matching_subgraph_pairs(
gm_a, gm_b, base_name_to_sets_of_related_ops,
unmatchable_types_map)
gm_a_shadows_b = create_a_shadows_b(
name_a, gm_a, name_b, gm_b, matched_subgraph_pairs, logger_cls,
should_log_inputs=should_log_inputs,
node_type_to_io_type_map=node_type_to_io_type_map)
return gm_a_shadows_b
def add_shadow_loggers(
name_a: str,
model_a: nn.Module,
name_b: str,
model_b: nn.Module,
logger_cls: Callable,
should_log_inputs: bool = False,
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> nn.Module:
"""
Instrument model A and model B with shadow loggers.
Args:
model_name_a: string name of model A to use in results
model_a: model A
model_name_b: string name of model B to use in results
model_b: model B
logger_cls: class of Logger to use
should_log_inputs: whether to log inputs
base_name_to_sets_of_related_ops: optional override of subgraph base nodes, subject to change
unmatchable_types_map: optional override of unmatchable types, subject to change
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.add_shadow_loggers")
# TODO(future PR): expose these
skipped_module_names: List[str] = []
skipped_module_classes: List[Callable] = []
tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
gm_a = GraphModule(model_a, tracer_a.trace(model_a))
if hasattr(model_a, '_node_name_to_scope'):
gm_a._node_name_to_scope = model_a._node_name_to_scope
gm_b = GraphModule(model_b, tracer_b.trace(model_b))
if hasattr(model_b, '_node_name_to_scope'):
gm_b._node_name_to_scope = model_b._node_name_to_scope
return _add_shadow_loggers_impl(
name_a, gm_a, name_b, gm_b, logger_cls,
should_log_inputs=should_log_inputs,
base_name_to_sets_of_related_ops=base_name_to_sets_of_related_ops,
node_type_to_io_type_map=node_type_to_io_type_map,
unmatchable_types_map=unmatchable_types_map)
def extract_shadow_logger_info(
model_a_shadows_b: nn.Module,
logger_cls: Callable,
model_name_to_use_for_layer_names: str,
) -> NSResultsType:
"""
Traverse all loggers in a shadow model, and extract the logged
information.
Args:
model_a_shadows_b: shadow model
logger_cls: class of Logger to use
model_name_to_use_for_layer_names: string name of model to use for
layer names in the output
Return:
NSResultsType, containing the logged comparisons
"""
torch._C._log_api_usage_once("quantization_api._numeric_suite_fx.extract_shadow_logger_info")
results: NSResultsType = collections.defaultdict(dict)
_extract_logger_info_one_model(model_a_shadows_b, results, logger_cls)
# fill in missing fqn entries
maybe_add_missing_fqns(results)
# rekey on the name of model b
results = rekey_logger_info_on_node_name_of_model(
results, model_name_to_use_for_layer_names)
return dict(results)
def extend_logger_results_with_comparison(
results: NSResultsType,
model_name_1: str,
model_name_2: str,
comparison_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
comparison_name: str,
) -> None:
"""
Compares the logged values from `model_name_2` against the corresponding
values in `model_name_1`, using `comparison_fn`. Records the result
in `model_name_2`'s results under `comparison_name`. Modifies `results` inplace.
Args:
results: the result data structure from `extract_logger_info` or
`extract_shadow_logger_info`.
model_name_1: string name of model 1
model_name_2: string name of model 2
comparison_fn: function to compare two Tensors
model_name_to_use_for_layer_names: string name of model to use for
layer names in the output
"""
for _, results_type_to_results in results.items():
for _, model_name_to_results in results_type_to_results.items():
assert model_name_1 in model_name_to_results, \
f"{model_name_1} not found in results"
assert model_name_2 in model_name_to_results, \
f"{model_name_2} not found in results"
results_1 = model_name_to_results[model_name_1]
results_2 = model_name_to_results[model_name_2]
for result_2 in results_2:
index_within_arg_2 = result_2['index_within_arg']
index_of_arg_2 = result_2['index_of_arg']
# find corresponding result_1
result_1 = None
for cur_result_1 in results_1:
index_within_arg_1 = cur_result_1['index_within_arg']
index_of_arg_1 = cur_result_1['index_of_arg']
if (
(index_within_arg_1 == index_within_arg_2) and
(index_of_arg_1 == index_of_arg_2)
):
result_1 = cur_result_1
break
assert result_1 is not None
values_1 = result_1['values']
values_2 = result_2['values']
result_2[comparison_name] = []
for value_1, value_2 in zip(values_1, values_2):
comparison_result = comparison_fn(value_1, value_2)
result_2[comparison_name].append(comparison_result)
|
pytorch-master
|
torch/ao/ns/_numeric_suite_fx.py
|
import torch
from torch.fx import GraphModule, map_arg
from torch.fx.graph import Graph, Node
from torch.ao.quantization.fx.utils import get_new_attr_name_with_prefix
from .utils import (
get_node_first_input_and_output_type,
getattr_from_fqn,
NodeInputOrOutputType,
return_first_non_observer_node,
get_number_of_non_param_args,
get_target_type_str,
get_arg_indices_of_inputs_to_log,
get_node_input_qparams,
op_type_supports_shadowing,
get_normalized_nth_input,
)
from .ns_types import (
NSSingleResultValuesType,
NSSubgraph,
NSNodeTargetType,
)
from torch.ao.ns.fx.mappings import (
get_node_type_to_io_type_map,
)
from torch.ao.quantization.quantize import is_activation_post_process
from typing import Dict, Tuple, Callable, List, Any, Union, Optional, Set
def _maybe_get_fqn(node: Node, gm: GraphModule) -> Optional[str]:
fqn = None
if hasattr(gm, '_node_name_to_scope'):
# fqn on observers is not present, because they do not
# exist when the fqns are created during tracing. If this is
# an observer, get the fqn of the node being observed.
node_to_use_for_fqn = node
if node.op == 'call_module':
assert isinstance(node.target, str)
module = getattr_from_fqn(gm, node.target)
if is_activation_post_process(module):
node_to_use_for_fqn = get_normalized_nth_input(node, gm, 0)
fqn = gm._node_name_to_scope[node_to_use_for_fqn.name][0] # type: ignore[index]
return fqn # type: ignore[return-value]
def _insert_logger_after_node(
node: Node,
gm: GraphModule,
logger_cls: Callable,
logger_node_name_suffix: str,
ref_node_name: str,
model_name: str,
ref_name: str,
ref_node_target_type: str,
results_type: str,
index_within_arg: int,
index_of_arg: int,
fqn: Optional[str],
) -> Node:
"""
Given a starting graph of
prev_node -> node -> next_node
This function creates a new logger_cls obj and adds it
after node, resulting in
prev_node -> node -> logger_obj -> next_node
"""
# create new name
logger_node_name = \
get_new_attr_name_with_prefix(node.name + logger_node_name_suffix)(gm)
target_type = get_target_type_str(node, gm)
# create the logger object
logger_obj = logger_cls(
ref_node_name, node.name, model_name, ref_name, target_type,
ref_node_target_type,
results_type, index_within_arg, index_of_arg, fqn)
# attach the logger object to the parent module
setattr(gm, logger_node_name, logger_obj)
logger_node = node.graph.create_node(
'call_module', logger_node_name, (node,), {})
return logger_node
def add_loggers_to_model(
gm: GraphModule,
node_to_instrument_inputs_to_ref_node_name: Dict[Node, Tuple[str, str]],
node_to_instrument_outputs_to_ref_node_name: Dict[Node, Tuple[str, str]],
logger_cls: Callable,
model_name: str,
) -> GraphModule:
"""
Takes the graph of gm, adds loggers to the output
of each node in nodes_to_instrument. Returns a GraphModule with the new
graph.
"""
new_graph = Graph()
env: Dict[str, Any] = {}
modules = dict(gm.named_modules())
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
for node in gm.graph.nodes:
if node.op == 'output':
new_graph.output(map_arg(get_normalized_nth_input(node, gm, 0), load_arg))
continue
if (
(node in node_to_instrument_inputs_to_ref_node_name) or
(node in node_to_instrument_outputs_to_ref_node_name)
):
fqn = _maybe_get_fqn(node, gm)
if node in node_to_instrument_inputs_to_ref_node_name:
ref_name, ref_node_type = node_to_instrument_inputs_to_ref_node_name[node]
# Ops such add and mul are special because either
# one or two of the first two arguments can be tensors,
# and if one argument is a tensor it can be first or
# second (x + 1 versus 1 + x).
arg_indices_to_log = get_arg_indices_of_inputs_to_log(node)
for node_arg_idx in arg_indices_to_log:
node_arg = get_normalized_nth_input(node, gm, node_arg_idx)
if type(node_arg) == Node:
# create a single input logger
prev_node = env[node_arg.name]
env[node_arg.name] = _insert_logger_after_node(
prev_node, gm, logger_cls, '_ns_logger_', node.name,
model_name, ref_name, ref_node_type,
NSSingleResultValuesType.NODE_INPUT.value,
index_within_arg=0, index_of_arg=node_arg_idx,
fqn=fqn)
elif type(node_arg) == torch.fx.immutable_collections.immutable_list:
# create N input loggers, one for each node
for arg_idx, arg in enumerate(node_arg): # type: ignore[var-annotated, arg-type]
prev_node = env[arg.name]
env[prev_node.name] = _insert_logger_after_node(
prev_node, gm, logger_cls, '_ns_logger_', node.name,
model_name, ref_name, ref_node_type,
NSSingleResultValuesType.NODE_INPUT.value,
index_within_arg=arg_idx, index_of_arg=node_arg_idx,
fqn=fqn)
else:
pass
# ensure env is populated with base node
# Note: runs for both inputs and outputs
env[node.name] = new_graph.node_copy(node, load_arg)
if node in node_to_instrument_outputs_to_ref_node_name:
ref_name, ref_node_type = node_to_instrument_outputs_to_ref_node_name[node]
# add the logger after the base node
env[node.name] = _insert_logger_after_node(
env[node.name], gm, logger_cls, '_ns_logger_', node.name,
model_name, ref_name, ref_node_type,
NSSingleResultValuesType.NODE_OUTPUT.value,
index_within_arg=0, index_of_arg=0, fqn=fqn)
else:
env[node.name] = new_graph.node_copy(node, load_arg)
new_gm = GraphModule(gm, new_graph)
return new_gm
def _insert_quantize_per_tensor_node(
prev_node_c: Node,
node_a: Node,
gm_b: GraphModule,
graph_c: Graph,
scale: Union[torch.Tensor, float],
zero_point: Union[torch.Tensor, int],
dtype_cast_name: str,
) -> Node:
# copy scale
scale_node_name = \
get_new_attr_name_with_prefix(
node_a.name + '_input_scale_')(gm_b)
setattr(gm_b, scale_node_name, scale)
scale_node = graph_c.create_node(
'get_attr', scale_node_name, (), {}, scale_node_name)
# copy zero_point
zero_point_node_name = \
get_new_attr_name_with_prefix(
node_a.name + '_input_zero_point_')(gm_b)
setattr(gm_b, zero_point_node_name, zero_point)
zero_point_node = graph_c.create_node(
'get_attr', zero_point_node_name, (), {}, zero_point_node_name)
# create the quantize_per_tensor call
return graph_c.create_node(
'call_function', torch.quantize_per_tensor,
(prev_node_c, scale_node, zero_point_node, torch.quint8), {},
dtype_cast_name)
def _insert_dtype_cast_after_node(
node_a: Node,
node_c: Node,
prev_node_c: Union[Node, List[Node]],
gm_a: GraphModule,
gm_b: GraphModule,
graph_c: Graph,
node_name_prefix: str,
logger_cls: Callable,
node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
) -> Union[Node, List[Node]]:
"""
Given a starting graph C (derived from graph B) of
... -> prev_node_c -> node_c -> ...
And a corresponding related node_a, inserts the correct dtype
cast node after prev_node_c to cast into the dtype expected
by node_a, resulting in:
dtype_cast
/
... -> prev_node_c -> node_c -> ...
For example, if node_c is an int8 op and node_a is an fp32 op, this function
will insert a dequant.
"""
dtype_cast_op = None
dtype_cast_mod_cls = None
dtype_cast_method = None
dtype_cast_method_dtype = None
dtype_cast_scale = None
dtype_cast_zero_point = None
node_input_type_a, _node_output_type_a = \
get_node_first_input_and_output_type(
node_a, gm_a, logger_cls, node_type_to_io_type_map)
node_input_type_c, _node_output_type_c = \
get_node_first_input_and_output_type(
node_c, gm_b, logger_cls, node_type_to_io_type_map)
if (
(node_input_type_a == NodeInputOrOutputType.FP32 and
node_input_type_c == NodeInputOrOutputType.INT8) or
(node_input_type_a == NodeInputOrOutputType.FP32 and
node_input_type_c == NodeInputOrOutputType.FP16) or
# TODO(future PR): determine the actual dtype of node_c,
# the current code only works because dequantize works with
# multiple input dtypes.
(node_input_type_a == NodeInputOrOutputType.FP32 and
node_input_type_c == NodeInputOrOutputType.FP32_OR_INT8)
):
dtype_cast_op = torch.dequantize
elif (
node_input_type_a == node_input_type_c and
node_input_type_a != NodeInputOrOutputType.UNKNOWN
):
dtype_cast_mod_cls = torch.nn.Identity
elif (
node_input_type_a == NodeInputOrOutputType.INT8 and
node_input_type_c == NodeInputOrOutputType.FP32
):
# int8 shadows fp32, the dtype cast needs to quantize to int8
# with the right qparams.
node_a_input_qparams = get_node_input_qparams(
node_a, gm_a, node_type_to_io_type_map)
if node_a_input_qparams is not None:
dtype_cast_op = torch.quantize_per_tensor # type: ignore[assignment]
dtype_cast_scale, dtype_cast_zero_point = node_a_input_qparams
elif (
node_input_type_a == NodeInputOrOutputType.FP16 and
node_input_type_c == NodeInputOrOutputType.FP32
):
dtype_cast_method = 'to'
dtype_cast_method_dtype = torch.float16
else:
raise AssertionError(
f"dtype cast from {node_input_type_c} {node_c.format_node()} to " +
f"{node_input_type_a} {node_a.format_node()} needs to be implemented")
if isinstance(prev_node_c, Node):
new_dtype_cast_name = \
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
if dtype_cast_op:
if dtype_cast_scale is not None and dtype_cast_zero_point is not None:
return _insert_quantize_per_tensor_node(
prev_node_c, node_a, gm_b, graph_c, dtype_cast_scale,
dtype_cast_zero_point, new_dtype_cast_name)
else:
return graph_c.create_node(
'call_function', dtype_cast_op, (prev_node_c,), {},
new_dtype_cast_name)
elif dtype_cast_method:
return graph_c.create_node(
'call_method', dtype_cast_method,
(prev_node_c, dtype_cast_method_dtype), {}, new_dtype_cast_name)
else:
assert dtype_cast_mod_cls
dtype_cast_mod = dtype_cast_mod_cls()
setattr(gm_b, new_dtype_cast_name, dtype_cast_mod)
return graph_c.create_node(
'call_module', new_dtype_cast_name, (prev_node_c,), {},
new_dtype_cast_name)
elif isinstance(prev_node_c, list):
results = []
for prev_node_c_inner in prev_node_c:
new_dtype_cast_name = \
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
if dtype_cast_op:
# TODO(future PR): add handling for quantize_per_tensor
new_dtype_cast_node = graph_c.create_node(
'call_function', dtype_cast_op, (prev_node_c_inner,), {},
new_dtype_cast_name)
results.append(new_dtype_cast_node)
else:
assert dtype_cast_mod_cls
dtype_cast_mod = dtype_cast_mod_cls()
setattr(gm_b, new_dtype_cast_name, dtype_cast_mod)
new_dtype_cast_node = graph_c.create_node(
'call_module', new_dtype_cast_name, (prev_node_c_inner,), {},
new_dtype_cast_name)
results.append(new_dtype_cast_node)
return results
else:
raise AssertionError(f"type f{type(prev_node_c)} is not handled")
# TODO(future PR): look into using copy_node API instead
def _copy_node_from_a_to_c(
node_a: Node,
gm_a: GraphModule,
gm_b: GraphModule,
graph_c: Graph,
) -> Node:
"""
Simple copy of node_a to graph_c.
"""
if node_a.op == 'get_attr':
node_a_copy_name = \
get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
node_a_obj = getattr_from_fqn(gm_a, node_a.target) # type: ignore[arg-type]
if torch.is_tensor(node_a_obj):
node_a_obj = node_a_obj.detach()
setattr(gm_b, node_a_copy_name, node_a_obj)
node_a_copy = graph_c.create_node(
node_a.op, node_a_copy_name, (), {}, node_a_copy_name)
return node_a_copy
elif node_a.op == 'call_method':
assert node_a.target in ('dequantize', 'to'), \
f"target {node_a.target} is not implemented"
if node_a.target == 'dequantize':
arg_copy = _copy_node_from_a_to_c(
get_normalized_nth_input(node_a, gm_a, 0),
gm_a, gm_b, graph_c) # type: ignore[arg-type]
node_a_copy_name = \
get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
node_a_copy = graph_c.create_node(
node_a.op, node_a.target, (arg_copy,), {}, node_a_copy_name)
return node_a_copy
else: # to
arg_copy = _copy_node_from_a_to_c(
get_normalized_nth_input(node_a, gm_a, 0), gm_a, gm_b, graph_c) # type: ignore[arg-type]
node_a_copy_name = \
get_new_attr_name_with_prefix(node_a.name + '_shadow_copy_')(gm_b)
node_a_copy = graph_c.create_node(
node_a.op, node_a.target,
(arg_copy, get_normalized_nth_input(node_a, gm_a, 1)),
{}, node_a_copy_name)
return node_a_copy
else:
raise AssertionError(
f"handling of node {node_a.format_node()} with op {node_a.op} is not implemented")
def _can_insert_copy_of_subgraph_a(
subgraph_a: NSSubgraph,
gm_a: GraphModule,
num_non_param_args_node_a: int,
) -> bool:
"""
This function returns `False` if the input subgraph cannot be copied by
`_insert_copy_of_subgraph_a_after_input_node_c`. This usually means
that there is a corner case logic for which copy is not yet implemented.
"""
# populate the list of nodes we need to check
nodes = []
cur_node = subgraph_a.end_node
while cur_node != subgraph_a.start_node:
nodes.append(cur_node)
cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment]
nodes.append(cur_node)
nodes.reverse()
def _can_insert(node_a_arg, gm_a):
if isinstance(node_a_arg, Node):
arg_a = return_first_non_observer_node(node_a_arg, gm_a)
if arg_a.op == 'call_method':
return arg_a.target in ('dequantize', 'to')
elif arg_a.op == 'get_attr':
return True
else:
return False
elif isinstance(node_a_arg, (list, tuple)):
for el in node_a_arg:
if not isinstance(el, Node):
return False
return True
# For each node, check if we handle the copy behavior. This follows the
# logic in `_insert_copy_of_subgraph_a_after_input_node_c`.
for node_a in nodes:
local_num_non_param_args_node_a = num_non_param_args_node_a \
if node_a is nodes[0] else 1
norm_args_kwargs = node_a.normalized_arguments(
gm_a, normalize_to_only_use_kwargs=True)
if norm_args_kwargs is not None:
norm_args, norm_kwargs = norm_args_kwargs
else:
norm_args, norm_kwargs = node_a.args, node_a.kwargs
cur_idx = 0
while cur_idx < len(norm_args):
if cur_idx == 0:
pass
elif cur_idx == 1 and local_num_non_param_args_node_a == 2:
pass
else:
if not _can_insert(norm_args[cur_idx], gm_a):
return False
cur_idx += 1
for kwarg_name, kwarg_val in norm_kwargs.items():
# stitch the inputs from base graph
if cur_idx == 0:
pass
elif cur_idx == 1 and local_num_non_param_args_node_a == 2:
pass
else:
if not _can_insert(kwarg_val, gm_a):
return False
cur_idx += 1
return True
def _insert_copy_of_subgraph_a_after_input_node_c(
input_node_c: Union[Node, List[Node]],
input_node_c_2: Optional[Union[Node, List[Node]]],
subgraph_a: NSSubgraph,
gm_a: GraphModule,
gm_b: GraphModule,
node_name_prefix: str,
) -> Node:
"""
TODO(before land): real docblock
"""
if isinstance(input_node_c, Node):
graph_c = input_node_c.graph
else:
assert isinstance(input_node_c, list)
graph_c = input_node_c[0].graph
# create a sequential list of the subgraphs' nodes from start to end,
# because we need to add the nodes to graph C in non-reverse order
nodes_of_a = [subgraph_a.end_node]
cur_node = subgraph_a.end_node
while cur_node != subgraph_a.start_node:
cur_node = get_normalized_nth_input(cur_node, gm_a, 0) # type: ignore[assignment]
nodes_of_a.insert(0, cur_node)
# go through nodes of a in order, and insert them into the graph of c
# sequentially
cur_node_a = nodes_of_a[0]
cur_node_c = _insert_copy_of_node_a_after_input_node_c(
input_node_c,
input_node_c_2,
cur_node_a,
gm_a,
gm_b,
node_name_prefix)
for cur_idx_a in range(1, len(nodes_of_a)):
cur_node_a = nodes_of_a[cur_idx_a]
prev_node_c = cur_node_c # previous added node is the input to next node
cur_node_c = _insert_copy_of_node_a_after_input_node_c(
prev_node_c,
# TODO(future PR): enable multiple inputs for nodes which are not at start of subgraph
None,
cur_node_a,
gm_a,
gm_b,
node_name_prefix)
# return the last inserted node
return cur_node_c
def _insert_copy_of_node_a_after_input_node_c(
input_node_c: Union[Node, List[Node]],
input_node_c_2: Optional[Union[Node, List[Node]]],
node_a: Node,
gm_a: GraphModule,
gm_b: GraphModule,
node_name_prefix: str,
) -> Node:
"""
Assume that node_a from graph_a has
args (input, (input2)?, arg1, ...), and
kwargs {kw0: kwarg0, ...}
Note: input2 is optional. If it equals to None, we assume that the op
has a single non-param input. If it is specified, we assume that the op
has two non-param inputs.
Copies the underlying values of arg1..argn and kwarg0..kwargn into gm_b,
and creates the corresponding nodes in graph_c. Note: observers are ignored,
so if an arg is an observer we navigate up until we find a non-observer parent.
If node_a is a call_module, points the module pointed to by node_a to gm_b.
Creates the copy of node_a in graph_c, with input as the first arg,
and all other args and kwargs pointing to the copies of the objects
in gm_b created above.
An example in pictures:
graph A:
========
input -------------> node_a
/ / /
(input_2)?----------/ / /
/ /
weight -> weight_obs /
/
bias ----------------
graph C (derived from B):
=========================
input_node_c --> node_a_copy
/ / /
(input_node_c_2)? / /
/ /
weight_copy ----/ /
/
bias_copy ------/
"""
if isinstance(input_node_c, Node):
graph_c = input_node_c.graph
else:
assert isinstance(input_node_c, list)
graph_c = input_node_c[0].graph
norm_args_kwargs = node_a.normalized_arguments(
gm_a, normalize_to_only_use_kwargs=True)
if norm_args_kwargs is not None:
norm_args, norm_kwargs = norm_args_kwargs
else:
norm_args, norm_kwargs = node_a.args, node_a.kwargs
new_args = []
new_kwargs = {}
def _copy_arg(arg):
# copy the other inputs from the other graph
if isinstance(arg, Node):
arg = return_first_non_observer_node(arg, gm_a)
arg = _copy_node_from_a_to_c(arg, gm_a, gm_b, graph_c)
return arg
elif isinstance(arg, (int, float, torch.dtype)):
return arg
elif isinstance(kwarg_val, (list, tuple)):
for el in kwarg_val:
assert not isinstance(el, Node), \
"handling of Node inside list is not implemented"
return arg
else:
raise AssertionError(
f"handling for kwarg of type {type(kwarg_val)} is not implemented")
cur_idx = 0
while cur_idx < len(norm_args):
if cur_idx == 0:
new_arg = input_node_c
elif cur_idx == 1 and input_node_c_2 is not None:
new_arg = input_node_c_2
else:
new_arg = _copy_arg(norm_args[cur_idx])
new_args.append(new_arg)
cur_idx += 1
for kwarg_name, kwarg_val in norm_kwargs.items():
# stitch the inputs from base graph
if cur_idx == 0:
new_kwargs[kwarg_name] = input_node_c
elif cur_idx == 1 and input_node_c_2 is not None:
new_kwargs[kwarg_name] = input_node_c_2
else:
new_kwargs[kwarg_name] = _copy_arg(kwarg_val)
cur_idx += 1
new_args = tuple(new_args) # type: ignore[assignment]
node_a_shadows_c_name = \
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
if node_a.op == 'call_module':
# if target is a module, we point to the module from gm_b
new_mod_copy_name = \
get_new_attr_name_with_prefix(node_name_prefix)(gm_b)
# fetch the corresponding module from gm_a
assert isinstance(node_a.target, str)
mod_a = getattr_from_fqn(gm_a, node_a.target)
setattr(gm_b, new_mod_copy_name, mod_a)
node_a_shadows_c = graph_c.create_node(
node_a.op, new_mod_copy_name, new_args,
new_kwargs, node_a_shadows_c_name)
return node_a_shadows_c
else:
assert node_a.op in ('call_function', 'call_method')
node_a_shadows_c = graph_c.create_node(
node_a.op, node_a.target, new_args,
new_kwargs, node_a_shadows_c_name)
return node_a_shadows_c
def create_a_shadows_b(
name_a: str,
gm_a: GraphModule,
name_b: str,
gm_b: GraphModule,
matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]],
logger_cls: Callable,
should_log_inputs: bool,
node_type_to_io_type_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> GraphModule:
"""
Creates a new GraphModule consisting of the graph of C, with the meaningful
nodes of A shadowing the corresponding nodes of B. For example,
Graph A:
a0 -> op0_fp32 -> a1 -> op1_fp32 -> a2
Graph B:
b0 -> op0_int8 -> b1 -> op1_int8 -> b2
matched_node_pairs: {'op0': (op0_fp32, op0_int8), 'op1': (op1_fp32, op1_int8)}
Graph C (A shadows B):
/ dequant0 -> op0_fp32 -> logger_a_0 / dequant_1 -> op1_fp32 -> logger_a_1
/ /
b0 -------------> op0_int8 -> logger_b_0 --------------> op1_int8 -> logger_b_1
In a nutshell, this function does the following for each node pair:
* copies the necessary attributes and modules from gm_a to gm_b,
keeping names unique
* adds a dtype cast op (dequant, quant, etc)
* adds a copy of node_a in gm_b's graph
* adds loggers to the outputs of node_a and node_b
"""
if node_type_to_io_type_map is None:
node_type_to_io_type_map = get_node_type_to_io_type_map()
# graph_c is the graph created from copying the nodes of graph_b and inserting
# the shadows with the nodes copied from graph_a
graph_c = Graph()
env_c: Dict[str, Any] = {}
modules = dict(gm_b.named_modules())
def load_arg(a):
return map_arg(a, lambda node: env_c[node.name])
start_node_b_to_matched_subgraph_a_and_name = {}
end_node_b_to_matched_subgraph_a_and_name = {}
for match_name, match in matched_subgraph_pairs.items():
subgraph_a, subgraph_b = match
ref_node_type_a = get_target_type_str(subgraph_a.base_op_node, gm_a)
ref_node_type_b = get_target_type_str(subgraph_b.base_op_node, gm_b)
start_node_b_to_matched_subgraph_a_and_name[subgraph_b.start_node] = \
(subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
end_node_b_to_matched_subgraph_a_and_name[subgraph_b.end_node] = \
(subgraph_a, match_name, ref_node_type_a, ref_node_type_b)
for node_b in gm_b.graph.nodes:
if node_b.op == 'output':
graph_c.output(map_arg(node_b.args[0], load_arg))
continue
# calculate the flags to determine what to do with this node
node_b_is_start_node = node_b in start_node_b_to_matched_subgraph_a_and_name
node_b_is_end_node = node_b in end_node_b_to_matched_subgraph_a_and_name
if (node_b_is_start_node or node_b_is_end_node):
if node_b_is_start_node:
subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
start_node_b_to_matched_subgraph_a_and_name[node_b]
else:
assert node_b_is_end_node
subgraph_a, ref_name, ref_node_type_a, ref_node_type_b = \
end_node_b_to_matched_subgraph_a_and_name[node_b]
all_op_types_support_shadowing = (
op_type_supports_shadowing(subgraph_a.start_node) and
op_type_supports_shadowing(node_b)
)
if not all_op_types_support_shadowing:
print(
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
', unsupported')
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
continue
# For both start_node and end_node verify that we know how to do
# the dtype cast. If we do not, skip.
node_input_type_a, node_output_type_a = \
get_node_first_input_and_output_type(
subgraph_a.start_node, gm_a, logger_cls,
node_type_to_io_type_map)
node_input_type_b, node_output_type_b = \
get_node_first_input_and_output_type(
node_b, gm_b, logger_cls,
node_type_to_io_type_map)
node_io_types_known_a_and_b = (
node_input_type_a != NodeInputOrOutputType.UNKNOWN and
node_output_type_a != NodeInputOrOutputType.UNKNOWN and
node_input_type_b != NodeInputOrOutputType.UNKNOWN and
node_output_type_b != NodeInputOrOutputType.UNKNOWN
)
if not node_io_types_known_a_and_b:
print(
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
', unknown dtype cast')
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
continue
# If we are shadowing from fp32 to int8, we need to insert
# quantize_per_tensor call with qparams from the previous node.
# Only do this if we are able to infer these qparams from the graph.
if (
node_input_type_a == NodeInputOrOutputType.INT8 and
node_input_type_b == NodeInputOrOutputType.FP32
):
node_a_input_qparams = get_node_input_qparams(
subgraph_a.start_node, gm_a, node_type_to_io_type_map)
if not node_a_input_qparams:
print(
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
', unknown input qparams')
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
continue
num_non_param_args_node_a = \
get_number_of_non_param_args(subgraph_a.start_node, gm_a)
if not _can_insert_copy_of_subgraph_a(subgraph_a, gm_a, num_non_param_args_node_a):
print(
f'skipping shadow loggers for node_b: {get_target_type_str(node_b, gm_b)}' +
f', start_node_a: {get_target_type_str(subgraph_a.start_node, gm_a)}' +
', unhandled logic in subgraph copy')
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
continue
fqn_base_a = _maybe_get_fqn(subgraph_a.base_op_node, gm_a)
fqn_base_b = _maybe_get_fqn(subgraph_b.base_op_node, gm_b)
if node_b_is_start_node:
# if necessary, log the input of node_c
if should_log_inputs:
prev_node_b = get_normalized_nth_input(node_b, gm_b, 0)
if isinstance(prev_node_b, Node):
prev_node_c = env_c[prev_node_b.name]
env_c[prev_node_c.name] = _insert_logger_after_node(
prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
node_b.name, name_b, ref_name, ref_node_type_b,
NSSingleResultValuesType.NODE_INPUT.value,
index_within_arg=0, index_of_arg=0,
fqn=fqn_base_b)
elif isinstance(prev_node_b, list):
# first, save the prev_node instances, because they
# will be overwritten in the env after the first logger
# is added
prev_node_c_list = [env_c[arg.name] for arg in prev_node_b]
for arg_idx, arg in enumerate(prev_node_b):
prev_node_c = prev_node_c_list[arg_idx]
env_c[prev_node_c.name] = _insert_logger_after_node(
prev_node_c, gm_b, logger_cls, '_ns_logger_b_inp_',
node_b.name, name_b, ref_name, ref_node_type_b,
NSSingleResultValuesType.NODE_INPUT.value,
index_within_arg=arg_idx, index_of_arg=0,
fqn=fqn_base_b)
else:
# logging of inputs which are not lists is not supported yet
raise AssertionError(f"type {type(prev_node_b)} is not handled yet")
# subgraph so far:
#
# (prev_node_c)+ -> (logger_c_input)?
# Note: this if statement is always True, spelling it out to clarify code
# intent.
if node_b_is_start_node or node_b_is_end_node:
# ensure env_c is populated with base node
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
node_c = env_c[node_b.name]
# after this point,
#
# node_a is the original node from graph_a, with parent module gm_a
# node_b is the original node from graph_b, with parent module gm_b
# node_c is the copy of node_b in graph_c
#
# subgraph so far:
#
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
if node_b_is_start_node:
# cast dtype from the dtype of node_c's input to the dtype of
# node_a's input (dequant, etc)
# prev_node_c = node_c.args[0]
prev_node_c = get_normalized_nth_input(node_c, gm_b, 0)
if should_log_inputs:
# skip the input logger when inserting a dtype cast
if isinstance(prev_node_c, Node):
prev_node_c = get_normalized_nth_input(node_c, gm_b, 0)
elif isinstance(prev_node_c, list):
prev_node_c = [get_normalized_nth_input(arg, gm_b, 0) for arg in prev_node_c]
dtype_cast_node = _insert_dtype_cast_after_node(
subgraph_a.start_node, node_c, prev_node_c, gm_a, gm_b, graph_c,
node_b.name + '_dtype_cast_', logger_cls,
node_type_to_io_type_map)
# note: not inserting to env_c because all nodes which use the dtype
# casts are copied from graph_a
#
# subgraph so far:
#
# (dtype_cast_node)+
# /
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
# if input logging is enabled, log the input to the subgraph
if should_log_inputs:
# TODO: explain this
ref_node_name = ''
if isinstance(dtype_cast_node, Node):
dtype_cast_node = _insert_logger_after_node(
dtype_cast_node, gm_b, logger_cls, '_ns_logger_a_inp_',
ref_node_name, name_a, ref_name, ref_node_type_a,
NSSingleResultValuesType.NODE_INPUT.value,
index_within_arg=0, index_of_arg=0,
fqn=fqn_base_a)
input_logger: Union[Node, List[Node]] = dtype_cast_node
else:
assert isinstance(dtype_cast_node, list)
new_loggers = []
for dtype_cast_idx, dtype_cast_node_inner in enumerate(dtype_cast_node):
dtype_cast_logger = _insert_logger_after_node(
dtype_cast_node_inner, gm_b, logger_cls, '_ns_logger_a_inp_',
ref_node_name, name_a, ref_name, ref_node_type_a,
NSSingleResultValuesType.NODE_INPUT.value,
index_within_arg=dtype_cast_idx,
index_of_arg=0,
fqn=fqn_base_a)
new_loggers.append(dtype_cast_logger)
dtype_cast_node = new_loggers
input_logger = dtype_cast_node
# subgraph so far:
#
# (dtype_cast_node)+ -> (logger_a_input)?
# /
# prev_node_c -> (logger_c_input)? -> node_start_c
# hook up the new mod_a copy to be in the graph, receiving the
# same inputs as mod_b does, with dtype cast to match a
# Some ops, such as LSTMs, have two non-param inputs. If we have
# such an op, pass the second param as well. Note: dtype casting
# for the second param is not implemented yet, it can be added
# later if there is a use case.
node_c_second_non_param_arg = None
num_non_param_args_node_a = get_number_of_non_param_args(subgraph_a.start_node, gm_a)
if num_non_param_args_node_a == 2:
# node_c_second_non_param_arg = node_c.args[1]
node_c_second_non_param_arg = get_normalized_nth_input(node_c, gm_b, 1)
node_a_shadows_c = _insert_copy_of_subgraph_a_after_input_node_c(
dtype_cast_node, node_c_second_non_param_arg,
subgraph_a, gm_a, gm_b, node_c.name + '_shadow_copy_')
env_c[node_a_shadows_c.name] = node_a_shadows_c
# subgraph so far:
#
# dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy(args/kwargs not shown)
# /
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
if should_log_inputs:
# When we created the input logger, we left the ref_node_name
# as an empty string, because the subgraph copy did not exist
# yet. Now that the subgraph copy exists, we modify this name
# to its true value.
# Note: the alternative to this is to create the input logger
# after creating the subgraph, which is slightly more
# complicated. This is the lesser of two evils.
# input_logger = env_c[dtype_cast_node.name]
# Find the first node in the subgraph
cur_node = node_a_shadows_c
while get_normalized_nth_input(cur_node, gm_b, 0) != input_logger:
cur_node = get_normalized_nth_input(cur_node, gm_b, 0) # type: ignore[assignment]
if isinstance(input_logger, Node):
input_logger_mod = getattr(gm_b, input_logger.name)
input_logger_mod.ref_node_name = cur_node.name
else:
assert isinstance(input_logger, list)
for input_logger_inner in input_logger:
input_logger_mod = getattr(gm_b, input_logger_inner.name)
input_logger_mod.ref_node_name = cur_node.name
# hook up a logger to the mod_a copy
env_c[node_a_shadows_c.name] = _insert_logger_after_node(
env_c[node_a_shadows_c.name], gm_b, logger_cls, '_ns_logger_a_',
node_a_shadows_c.name, name_a, ref_name, ref_node_type_a,
NSSingleResultValuesType.NODE_OUTPUT.value,
index_within_arg=0, index_of_arg=0,
fqn=fqn_base_a)
# subgraph so far:
#
# dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
# /
# (prev_node_c)+ -> (logger_c_input)? -> node_start_c
if node_b_is_end_node:
# hook up a logger to the mod_b copy
env_c[node_b.name] = _insert_logger_after_node(
env_c[node_b.name], gm_b, logger_cls, '_ns_logger_b_',
node_b.name, name_b, ref_name, ref_node_type_b,
NSSingleResultValuesType.NODE_OUTPUT.value,
index_within_arg=0, index_of_arg=0,
fqn=fqn_base_b)
# subgraph so far:
#
# dtype_cast_node -> (logger_a_input)? -> subgraph_a_copy -> logger_a
# /
# (prev_node_c+) -> (logger_c_input)? -> node_start_c -> ... -> node_end_c -> logger_c
#
# Note: node_start_c may be the same node as node_end_c, or they
# may have nodes inbetween.
else:
env_c[node_b.name] = graph_c.node_copy(node_b, load_arg)
gm_c = GraphModule(gm_b, graph_c)
return gm_c
|
pytorch-master
|
torch/ao/ns/fx/graph_passes.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.quantized.dynamic as nnqd
import torch.nn.quantized as nnq
import torch.nn.intrinsic.qat as nniqat
import torch.nn.qat as nnqat
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Node
from .utils import (
get_target_type_str,
getattr_from_fqn,
return_first_non_observer_node,
)
from .ns_types import (
NSSingleResultValuesType,
NSSingleResultType,
)
from typing import List, Optional, Dict, Callable
def mod_weight_detach(mod: nn.Module) -> torch.Tensor:
return mod.weight.detach() # type: ignore[operator]
def mod_0_weight_detach(mod: nn.Module) -> torch.Tensor:
return mod[0].weight.detach() # type: ignore[index]
def mod_weight_bias_0(mod: nn.Module) -> torch.Tensor:
return mod._weight_bias()[0] # type: ignore[operator]
def get_lstm_weight(mod: nn.Module) -> List[torch.Tensor]:
res = []
for idx, param_name in enumerate(mod._flat_weights_names): # type: ignore[arg-type]
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
param_value = mod._flat_weights[idx].detach() # type: ignore[index]
res.append(param_value)
return res
def get_qlstm_weight(mod: nn.Module) -> List[torch.Tensor]:
res = []
for weight_value in mod._all_weight_values: # type: ignore[union-attr]
res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
return res
def get_conv_mod_weight(mod: nn.Module) -> torch.Tensor:
if (
isinstance(mod, nn.Conv1d) or
isinstance(mod, nn.Conv2d) or
isinstance(mod, nn.Conv3d)
):
return mod.weight.detach()
elif (
isinstance(mod, nni.ConvReLU1d) or
isinstance(mod, nni.ConvReLU2d) or
isinstance(mod, nni.ConvReLU3d)
):
return mod[0].weight.detach()
else:
return mod._weight_bias()[0] # type: ignore[operator]
def get_linear_mod_weight(mod: nn.Module) -> torch.Tensor:
if isinstance(mod, nn.Linear):
return mod.weight.detach()
elif isinstance(mod, nni.LinearReLU):
return mod[0].weight.detach()
else:
return mod._weight_bias()[0] # type: ignore[operator]
def get_lstm_mod_weights(mod: nn.Module) -> List[torch.Tensor]:
# TODO(future PR): make more generic, handle everything
if isinstance(mod, nn.LSTM):
res = []
for idx, param_name in enumerate(mod._flat_weights_names):
if 'weight_ih_l' in param_name or 'weight_hh_l' in param_name:
param_value = mod._flat_weights[idx].detach()
res.append(param_value)
return res
else:
assert isinstance(mod, nnqd.LSTM), f"type {type(res)} not handled yet"
res = []
for weight_value in mod._all_weight_values:
res.append(weight_value.param.__getstate__()[0][4][0].__getstate__()[0][0])
res.append(weight_value.param.__getstate__()[0][4][1].__getstate__()[0][0])
return res
def get_conv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
# traverse backwards from the weight arg, accounting for any observers
weight_arg_node = node.args[1]
assert isinstance(weight_arg_node, Node)
weight_node = return_first_non_observer_node(weight_arg_node, gm)
assert isinstance(weight_node, Node)
assert weight_node.op == 'get_attr'
weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
return weight.detach()
def get_qconv_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
# qconv state is arg 1
qconv_state_node = node.args[1]
assert isinstance(qconv_state_node, Node)
assert qconv_state_node.op == 'get_attr'
qconv_state_obj = getattr_from_fqn(gm, qconv_state_node.target) # type: ignore[arg-type]
return qconv_state_obj.weight()
def get_linear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
# traverse backwards from the weight arg, accounting for any observers
# supported patterns:
# weight -> obs -> linear
# weight -> to(torch.float16) -> dequantize -> linear
linear_second_arg = node.args[1]
assert isinstance(linear_second_arg, Node)
if linear_second_arg.op == 'call_module':
# weight -> obs -> linear
weight_arg_node = node.args[1]
assert isinstance(weight_arg_node, Node)
weight_node = weight_arg_node.args[0]
assert isinstance(weight_node, Node)
assert weight_node.op == 'get_attr'
weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
return weight.detach()
elif linear_second_arg.op == 'call_method':
# weight -> to(torch.float16) -> dequantize -> linear
assert linear_second_arg.op == 'call_method'
dequant_node = node.args[1]
assert isinstance(dequant_node, Node)
to_fp16_node = dequant_node.args[0]
assert isinstance(to_fp16_node, Node)
# extract the dtype, so we can cast to it before returning
target_dtype = to_fp16_node.args[1]
weight_node = to_fp16_node.args[0]
assert isinstance(weight_node, Node)
assert weight_node.op == 'get_attr'
weight = getattr_from_fqn(gm, weight_node.target) # type: ignore[arg-type]
# return the weight with fp16 cast
return weight.detach().to(target_dtype)
else:
assert linear_second_arg.op == 'get_attr'
weight = getattr_from_fqn(gm, linear_second_arg.target) # type: ignore[arg-type]
return weight.detach()
def get_qlinear_fun_weight(node: Node, gm: GraphModule) -> torch.Tensor:
# packed weight is arg 1
packed_weight_node = node.args[1]
assert isinstance(packed_weight_node, Node)
assert packed_weight_node.op == 'get_attr'
packed_weight = getattr_from_fqn(gm, packed_weight_node.target) # type: ignore[arg-type]
# TODO(future PR): why does packed_weight.unpack() not work?
(weight, _bias), _name = packed_weight.__getstate__()
return weight
def get_op_to_type_to_weight_extraction_fn() -> Dict[str, Dict[Callable, Callable]]:
op_to_type_to_weight_extraction_fn: Dict[str, Dict[Callable, Callable]] = {
'call_module': {
# Conv1d
nn.Conv1d: mod_weight_detach,
nni.ConvReLU1d: mod_0_weight_detach,
nnq.Conv1d: mod_weight_bias_0,
nnqat.Conv1d: mod_weight_detach,
nniqat.ConvBn1d: mod_weight_detach,
nniqat.ConvBnReLU1d: mod_weight_detach,
nniqat.ConvReLU1d: mod_weight_detach,
nniq.ConvReLU1d: mod_weight_bias_0,
# Conv2d
nn.Conv2d: mod_weight_detach,
nni.ConvReLU2d: mod_0_weight_detach,
nnq.Conv2d: mod_weight_bias_0,
nnqat.Conv2d: mod_weight_detach,
nniqat.ConvBn2d: mod_weight_detach,
nniqat.ConvBnReLU2d: mod_weight_detach,
nniqat.ConvReLU2d: mod_weight_detach,
nniq.ConvReLU2d: mod_weight_bias_0,
# Conv3d
nn.Conv3d: mod_weight_detach,
nni.ConvReLU3d: mod_0_weight_detach,
nnq.Conv3d: mod_weight_bias_0,
nnqat.Conv3d: mod_weight_detach,
nniqat.ConvBn3d: mod_weight_detach,
nniqat.ConvBnReLU3d: mod_weight_detach,
nniqat.ConvReLU3d: mod_weight_detach,
nniq.ConvReLU3d: mod_weight_bias_0,
# Linear
nn.Linear: mod_weight_detach,
nnq.Linear: mod_weight_bias_0,
nni.LinearReLU: mod_0_weight_detach,
nniq.LinearReLU: mod_weight_bias_0,
nnqat.Linear: mod_weight_detach,
nnqd.Linear: mod_weight_bias_0,
nniqat.LinearReLU: mod_weight_detach,
nniqat.LinearBn1d: mod_weight_detach,
nn.modules.linear.NonDynamicallyQuantizableLinear: mod_weight_detach,
# LSTM
nn.LSTM: get_lstm_weight,
nnqd.LSTM: get_qlstm_weight,
},
'call_function': {
# Conv
F.conv1d: get_conv_fun_weight,
F.conv2d: get_conv_fun_weight,
F.conv3d: get_conv_fun_weight,
toq.conv1d: get_qconv_fun_weight,
toq.conv2d: get_qconv_fun_weight,
toq.conv3d: get_qconv_fun_weight,
toq.conv1d_relu: get_qconv_fun_weight,
toq.conv2d_relu: get_qconv_fun_weight,
toq.conv3d_relu: get_qconv_fun_weight,
# Linear
F.linear: get_linear_fun_weight,
toq.linear: get_qlinear_fun_weight,
toq.linear_relu: get_qlinear_fun_weight,
},
}
return op_to_type_to_weight_extraction_fn
def extract_weight_from_node(
node: Node,
gm: GraphModule,
op_to_type_to_weight_extraction_fn: Optional[Dict[str, Dict[Callable, Callable]]] = None,
) -> Optional[NSSingleResultType]:
res_type = NSSingleResultValuesType.WEIGHT.value
# Not all graphmodules have _node_name_to_scope, so only fill it
# out if it exists.
fqn = None
if hasattr(gm, '_node_name_to_scope'):
fqn = gm._node_name_to_scope[node.name][0] # type: ignore[index]
if op_to_type_to_weight_extraction_fn is None:
op_to_type_to_weight_extraction_fn = get_op_to_type_to_weight_extraction_fn()
ref_node_type = get_target_type_str(node, gm)
# for extracting weights, these are always the same
prev_node_type = ref_node_type
if node.op == 'call_function':
function_mapping = op_to_type_to_weight_extraction_fn['call_function']
for target_fn_type, weight_extraction_fn in function_mapping.items():
if node.target == target_fn_type:
weight = weight_extraction_fn(node, gm)
return {
'type': res_type,
'values': [weight],
'prev_node_name': node.name,
'prev_node_target_type': prev_node_type,
'ref_node_name': node.name,
'ref_node_target_type': ref_node_type,
'index_within_arg': 0,
'index_of_arg': 0,
'fqn': fqn,
}
elif node.op == 'call_module':
# for call_module, we need to look up the modules to do the type check
assert isinstance(node.target, str)
mod = getattr_from_fqn(gm, node.target)
module_mapping = op_to_type_to_weight_extraction_fn['call_module']
for target_mod_type, weight_extraction_fn in module_mapping.items():
if type(mod) == target_mod_type:
weight = weight_extraction_fn(mod)
return {
'type': res_type,
'values': [weight],
'prev_node_name': node.name,
'prev_node_target_type': prev_node_type,
'ref_node_name': node.name,
'ref_node_target_type': ref_node_type,
'index_within_arg': 0,
'index_of_arg': 0,
'fqn': fqn,
}
return None
|
pytorch-master
|
torch/ao/ns/fx/weight_utils.py
|
import collections
import enum
import torch
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Graph, Node
from torch.ao.quantization.utils import getattr_from_fqn
from .ns_types import NSSubgraph, NSNodeTargetType
from .mappings import (
get_base_name_to_sets_of_related_ops,
get_unmatchable_types_map,
)
from .pattern_utils import (
get_type_a_related_to_b,
get_reversed_fusions,
end_node_matches_reversed_fusion,
)
from torch.ao.quantization import (
ObserverBase,
FakeQuantizeBase,
)
from typing import Dict, Tuple, List, Optional, Set, Any
def _get_output_nodes(g: Graph) -> List[Node]:
return [n for n in g.nodes if n.op == 'output']
class _NSGraphMatchableSubgraphsIterator:
"""
Iterates through the graph of gm, starting with the output nodes
and continuing backwards.
1. Returns matchable subgraphs, in order. A subgraph is defined by
(start_node, end_node).
2. Skips over non-matchable subgraphs
"""
def __init__(
self,
gm: GraphModule,
non_matchable_functions: Set[NSNodeTargetType],
non_matchable_modules: Set[NSNodeTargetType],
non_matchable_methods: Set[NSNodeTargetType],
):
self.gm: GraphModule = gm
self.non_matchable_functions: Set[NSNodeTargetType] = non_matchable_functions
self.non_matchable_modules: Set[NSNodeTargetType] = non_matchable_modules
self.non_matchable_methods: Set[NSNodeTargetType] = non_matchable_methods
self.seen_nodes: Set[Node] = set()
self.stack: List[Node] = []
for start_node in _get_output_nodes(self.gm.graph):
self.stack.append(start_node)
def __iter__(self):
return self
def __next__(self) -> NSSubgraph:
"""
Returns the next matchable subgraph.
"""
while len(self.stack) > 0:
cur_end_node = self.stack.pop()
if cur_end_node in self.seen_nodes:
continue
# for subgraphs which are single nodes, start_node == end_node
# for subgraphs with more than one node, start node != end_node
cur_start_node = cur_end_node
# Subgraphs like linear-relu have the base node as the start node.
# Subgraphs like dequantize-linear-relu-to(torch.float16) have the
# base node as the second node.
# The cur_base_op_node var will move to the actual node during
# the fusion matching later in this code block.
cur_base_op_node = cur_end_node
# Check for potential fusions. For now, we are greedy
# and always skip all non-base nodes of a fusion. For example,
# if we match linear-relu backwards, we will always skip the
# relu node and attempt to match the linear node. This can
# be made configurable later if needed.
for _reverse_fusion_ops, base_op_idx in get_reversed_fusions():
is_match = end_node_matches_reversed_fusion(
cur_end_node, _reverse_fusion_ops, self.gm, self.seen_nodes)
if is_match:
# navigate to the base node
for rev_fusion_idx in range(len(_reverse_fusion_ops) - 1):
self.seen_nodes.add(cur_start_node)
# for now, assume that there are no other nodes
# which need to be added to the stack
cur_start_node = cur_start_node.args[0] # type: ignore[assignment]
# if the base op index matches the current node, set it
rev_base_op_idx = \
len(_reverse_fusion_ops) - 2 - base_op_idx
if rev_fusion_idx == rev_base_op_idx:
cur_base_op_node = cur_start_node
break
self.seen_nodes.add(cur_start_node)
# add args of previous nodes to stack
for arg in cur_start_node.all_input_nodes:
self._recursively_add_node_arg_to_stack(arg)
# skip unmatchable nodes
# note: this check is done on the start_node, i.e.
# if we are matching linear-relu in reverse, this would do the matchable
# check on the linear
if not self._is_matchable(cur_base_op_node):
continue
# If an observer or a fake_quant was not matched as a part of
# a pattern of multiple nodes, ignore it. One case where this is
# relevant is an observer on a graph input, which was added because
# it is necessary for the next node.
if cur_end_node.op == 'call_module' and cur_start_node is cur_end_node:
maybe_obs = getattr_from_fqn(self.gm, cur_end_node.target) # type: ignore[arg-type]
if isinstance(maybe_obs, (ObserverBase, FakeQuantizeBase)):
continue
return NSSubgraph(
start_node=cur_start_node, end_node=cur_end_node,
base_op_node=cur_base_op_node)
raise StopIteration
def _recursively_add_node_arg_to_stack(self, arg: Any) -> None:
"""
Adds all of the nodes in this arg to the stack, properly navigating
through list, dicts and tuples.
"""
if isinstance(arg, Node):
self.stack.append(arg)
elif isinstance(arg, torch.fx.immutable_collections.immutable_list) or type(arg) is tuple:
for inner_arg in arg:
self._recursively_add_node_arg_to_stack(inner_arg)
elif isinstance(arg, torch.fx.immutable_collections.immutable_dict):
for key, value in arg.items():
self._recursively_add_node_arg_to_stack(value)
def _is_matchable(self, node: Node) -> bool:
if node.op == 'call_function':
return not (node.target in self.non_matchable_functions)
elif node.op == 'call_module':
assert isinstance(node.target, str)
target_mod = getattr_from_fqn(self.gm, node.target)
return not \
any(isinstance(target_mod, t) # type: ignore[arg-type]
for t in self.non_matchable_modules)
elif node.op == 'call_method':
return not (node.target in self.non_matchable_methods)
else:
return False
class GraphMatchingException(Exception):
"""
Exception raised when two graphs cannot be matched.
"""
pass
class SubgraphTypeRelationship(enum.Enum):
# same type, known
# example: F.linear and F.linear, or nn.Conv2d and nn.Conv2d
EQUAL = enum.auto()
# same type, but the type is not known to Numerical Suite
# (user defined type, etc).
EQUAL_BUT_UKNOWN = enum.auto()
# known, same subgraph_relationship set, but not the same type
# example: F.linear and toq.linear
RELATED_BUT_NOT_EQUAL = enum.auto()
# not related
NOT_RELATED = enum.auto()
def _get_subgraph_relationship_type(
subgraph_a: NSSubgraph,
subgraph_b: NSSubgraph,
gm_a: GraphModule,
gm_b: GraphModule,
type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]],
) -> SubgraphTypeRelationship:
node_a = subgraph_a.base_op_node
node_b = subgraph_b.base_op_node
# TODO(next): make this code handle matching by what is before the base op
if node_a.op != node_b.op:
if not (
node_a.op in ('call_function', 'call_method') and
node_b.op in ('call_function', 'call_method')
):
return SubgraphTypeRelationship.NOT_RELATED
if node_a.op in ('call_function', 'call_method'):
key = (node_a.target, node_b.target)
if key not in type_a_related_to_b:
if node_a.target == node_b.target:
return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN
else:
return SubgraphTypeRelationship.NOT_RELATED
# after this point, we are dealing with known types
if node_a.target == node_b.target:
node_a_has_prev = subgraph_a.base_op_node == subgraph_a.start_node
node_b_has_prev = subgraph_b.base_op_node == subgraph_b.start_node
if node_a_has_prev and (not node_b_has_prev):
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
elif (not node_a_has_prev) and node_b_has_prev:
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
elif (not node_a_has_prev) and (not node_b_has_prev):
return SubgraphTypeRelationship.EQUAL
else:
# TODO(future PR): check for matches start_op_node and base_op_node
return SubgraphTypeRelationship.EQUAL
if key in type_a_related_to_b:
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
else:
return SubgraphTypeRelationship.NOT_RELATED
elif node_a.op == 'call_module':
assert (subgraph_a.base_op_node == subgraph_a.start_node and
subgraph_b.base_op_node == subgraph_b.start_node), \
"Matching call_module patterns where base_op_node != start_node is not supported yet"
# for call_module, we need to look up the modules to do the type check
assert isinstance(node_a.target, str)
mod_a = getattr_from_fqn(gm_a, node_a.target)
assert isinstance(node_b.target, str)
mod_b = getattr_from_fqn(gm_b, node_b.target)
key = (type(mod_a), type(mod_b))
if key not in type_a_related_to_b:
if type(mod_a) == type(mod_b):
return SubgraphTypeRelationship.EQUAL_BUT_UKNOWN
else:
return SubgraphTypeRelationship.NOT_RELATED
elif type(mod_a) == type(mod_b):
return SubgraphTypeRelationship.EQUAL
else:
return SubgraphTypeRelationship.RELATED_BUT_NOT_EQUAL
return SubgraphTypeRelationship.NOT_RELATED
def _get_name_for_subgraph(
subgraph_a: NSSubgraph,
gm_a: GraphModule,
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
existing_names: Set[str],
) -> str:
"""
Returns a unique name for a subgraph. This name is based on two things:
1. the name of the set containing the underlying type of the base op in the
subgraph (i.e. 'torch.nn.functional.linear' if this is related to a linear op)
2. the number of previous subgraphs with related underlying type of the base op
For example, in the graph
linear0 -> relu0 -> linear1 -> relu1
The subgraphs are (linear0, relu0) and (linear1, relu1). If we iterate
from the output node backwards, the name given to (linear1, relu1) will be
`base_op_torch.nn.functional.linear_0`, and the name given to (linear0, relu0)
will be `base_op_torch.nn.functional.linear_1`.
Why are we not just using the node name? Answer: because of two requirements:
A. fusions must be supported
B. some Numeric Suite APIs can be called without having all of the models in memory
For example, let's say we need to match nodes of
(1) ... -> linear0 -> relu0 -> ...
And
(2) ... -> linear_relu0 -> ...
Without being able to inspect them together. With the current naming scheme, if
we iterate through both of these graphs in the same order, and assuming the rest
of the graphs match, both of these subgraphs will get the same name without
(1) and (2) knowing anything about each other.
"""
target_type = _get_node_target_type(subgraph_a.base_op_node, gm_a)
target_base_type = None
for base_name, sets_of_related_ops in base_name_to_sets_of_related_ops.items():
if target_type in sets_of_related_ops:
target_base_type = base_name
target_base_name = 'base_op_' + str(target_base_type)
counter = 0
proposed_name = target_base_name + '_' + str(counter)
while proposed_name in existing_names:
counter += 1
proposed_name = target_base_name + '_' + str(counter)
existing_names.add(proposed_name)
return proposed_name
def _get_node_target_type(node: Node, gm: GraphModule) -> Optional[NSNodeTargetType]:
if node.op in ('call_function', 'call_method'):
return node.target
elif node.op == 'call_module':
assert isinstance(node.target, str)
mod = getattr_from_fqn(gm, node.target)
return type(mod)
return None
def get_matching_subgraph_pairs(
gm_a: GraphModule,
gm_b: GraphModule,
base_name_to_sets_of_related_ops: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> Dict[str, Tuple[NSSubgraph, NSSubgraph]]:
"""
Matches matchable subgraphs of graph_a to graph_b.
For a node, "matchable" is defined as a node which is not an observer,
fake_quants, quant or dequant.
A subgraph can contain one or more nodes. A subgraph is matchable if
at least one node inside of it is matchable. Currently, all nodes in
a subgraph must be matchable (because we assume no observers will be
inserted in the middle of a fusion).
A subgraph is defined by (start_node, end_node). We assume that only
start_node and end_node are linked with the surrounding graph, all other
nodes in a subgraph are self-contained.
A pair of nodes is "related" if both nodes represent the same mathematical
operation across different quantization flavors. For example,
`F.linear` and `torch.ops.quantized.linear` are related, and
`F.linear` and `torch.nn.Conv` are not related.
For each matchable pair of nodes node_a and node_b, they will match
if node_a and node_b are related.
For graphs A and B, they will match iff:
1. the number of matchable subgraphs in A and B is equivalent
2. when iterating through the matchable subgraphs of A and B in the same order, each
corresponding pair of base nodes is related.
This enables us to find the corresponding subgraphs between
graphs of related models. For example, if we had two graphs such as:
graph_a: x0 -> conv_0 (type: nn.Conv2d) -> obs_0 -> x1
w -/
b -/
graph_b: x0 -> quant_0 -> qconv_0 (type: nnq.Conv2d) -> dequant_0 -> x1
packed_params_0 -/
This function will return the following result:
{
'conv_0': ( # the name of the node in graph_b
(conv_0, conv_0), # (start_node_a, end_node_a)
(qconv_0, qconv_0), # (start_node_b, end_node_b)
),
}
Or, if we have a fusion pattern,
graph_a: x0 -> linear_0 -> relu_0 -> obs_0 -> x1
w -/
b -/
graph_b: x0 -> quant_0 -> linear_relu_0 -> dequant_0 -> x1
packed_params_0 -/
This function will return the following result:
{
'linear_relu_0': ( # the name of the node in graph_b
(linear_0, relu_0), # (start_node_a, end_node_a)
(linear_relu_0, linear_relu_0), # (start_node_b, end_node_b)
),
}
"""
if unmatchable_types_map is None:
unmatchable_types_map = get_unmatchable_types_map()
non_matchable_functions = unmatchable_types_map['funs_unmatchable']
non_matchable_modules = unmatchable_types_map['mods_unmatchable']
non_matchable_methods = unmatchable_types_map['meths_unmatchable']
graph_a_iterator = _NSGraphMatchableSubgraphsIterator(
gm_a, non_matchable_functions, non_matchable_modules,
non_matchable_methods)
graph_b_iterator = _NSGraphMatchableSubgraphsIterator(
gm_b, non_matchable_functions, non_matchable_modules,
non_matchable_methods)
results = collections.OrderedDict()
if base_name_to_sets_of_related_ops is None:
base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
type_a_related_to_b = \
get_type_a_related_to_b(base_name_to_sets_of_related_ops)
existing_names_a: Set[str] = set()
existing_names_b: Set[str] = set()
while True:
# fetch the next subgraphs from a and b
cur_subgraph_a, cur_subgraph_b = None, None
try:
cur_subgraph_a = next(graph_a_iterator)
except StopIteration:
pass
try:
cur_subgraph_b = next(graph_b_iterator)
except StopIteration:
pass
# look up types of a and b for useful error messages
type_start_a, type_start_b = None, None
if cur_subgraph_a is not None:
type_start_a = _get_node_target_type(cur_subgraph_a.start_node, gm_a)
if cur_subgraph_b is not None:
type_start_b = _get_node_target_type(cur_subgraph_b.start_node, gm_b)
# check for results and determine what to do next
if cur_subgraph_a is not None and cur_subgraph_b is not None:
# both nodes were fetched, check for subgraph_relationship
# note: subgraph_relationship is checked on the start node, i.e.
# if a linear-relu pattern is checked, we would check for subgraph_relationship
# of the linear
subgraph_relationship = _get_subgraph_relationship_type(
cur_subgraph_a, cur_subgraph_b,
gm_a, gm_b, type_a_related_to_b)
if subgraph_relationship == SubgraphTypeRelationship.NOT_RELATED:
msg = f"""
The subgraphs
({cur_subgraph_a}, {type_start_a}) and
({cur_subgraph_b}, {type_start_b})
are not related. Please ensure that the two models you pass in have the same number
of subgraphs, and each pair of subgraphs is related to each other."""
raise GraphMatchingException(msg)
elif subgraph_relationship == SubgraphTypeRelationship.EQUAL_BUT_UKNOWN:
# skip matching but unknown types
continue
key_name_a = _get_name_for_subgraph(
cur_subgraph_a, gm_a, base_name_to_sets_of_related_ops,
existing_names_a)
key_name_b = _get_name_for_subgraph(
cur_subgraph_b, gm_b, base_name_to_sets_of_related_ops,
existing_names_b)
assert key_name_a == key_name_b, \
f"Subgraph names {key_name_a} and {key_name_b} do not match"
results[key_name_a] = (cur_subgraph_a, cur_subgraph_b)
continue
elif cur_subgraph_a is None and cur_subgraph_b is None:
# we reached the end of both graphs
break
else:
# only one node was fetched, no match possible, throw error
msg = f"""
Attempting to match
({cur_subgraph_a}, {type_start_a}) and
({cur_subgraph_b}, {type_start_b}),
one of which is empty. Please ensure that the two models you pass in have the same number
of subgraphs."""
raise GraphMatchingException(msg)
# The subgraph pairs are originally created by traversing the two graphs
# from the outputs to the inputs. Reverse the results to return the
# subgraphs in their order of execution.
results = collections.OrderedDict(reversed(list(results.items())))
return results
|
pytorch-master
|
torch/ao/ns/fx/graph_matcher.py
|
pytorch-master
|
torch/ao/ns/fx/__init__.py
|
|
import enum
import operator
import torch
import torch.nn as nn
import torch.nn.intrinsic.quantized as nniq
import torch.nn.quantized as nnq
toq = torch.ops.quantized
from typing import Tuple, Callable, Dict, Set, List, Optional, Union
from torch.fx import GraphModule
from torch.fx.graph import Node
from torch.ao.quantization import (
ObserverBase,
FakeQuantizeBase,
)
from torch.ao.quantization.utils import getattr_from_fqn
from torch.ao.quantization.quantize import is_activation_post_process
from .ns_types import NSNodeTargetType, NSResultsType
# TODO(future PR): consider deleting this enum and using the torch types
# directly. This might be tricky because it is not a one to one mapping.
class NodeInputOrOutputType(enum.Enum):
FP32 = enum.auto() # torch.float
INT8 = enum.auto() # torch.qint8 or torch.quint8
FP16 = enum.auto() # torch.float16
UNKNOWN = enum.auto() # we cannot determine input/output dtype
# TODO(future PR): while these functions can support multiple dtypes,
# for the purposes of numerical debugging we want to get the actual
# dtype used in the model. We will likely need some kind of dtype
# propagation to estimate this.
FP32_OR_INT8 = enum.auto() # either torch.float or torch.quint8 or torch.qint8
# TODO(future PRs): dynamic quant, fake quant, etc
def get_node_first_input_and_output_type(
node: Node,
gm: GraphModule,
logger_cls: Callable,
node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
) -> Tuple[NodeInputOrOutputType, NodeInputOrOutputType]:
# TODO(future PR): clean this up
FUNS_IO_TYPE_FP32 = node_type_to_io_type_map["funs_io_type_fp32"]
FUNS_IO_TYPE_FP16 = node_type_to_io_type_map["funs_io_type_fp16"]
FUNS_IO_TYPE_INT8 = node_type_to_io_type_map["funs_io_type_int8"]
FUNS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["funs_io_type_fp32_or_int8"]
MODS_IO_TYPE_FP32 = node_type_to_io_type_map["mods_io_type_fp32"]
MODS_IO_TYPE_INT8 = node_type_to_io_type_map["mods_io_type_int8"]
MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
METHS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["meths_io_type_fp32_or_int8"]
if node.op == "call_function":
if node.target in FUNS_IO_TYPE_FP32:
return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
if node.target in FUNS_IO_TYPE_FP16:
return (NodeInputOrOutputType.FP16, NodeInputOrOutputType.FP16)
elif node.target in FUNS_IO_TYPE_INT8:
return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
elif node.target in FUNS_IO_TYPE_FP32_OR_INT8:
first_arg = get_normalized_nth_input(node, gm, 0)
assert isinstance(first_arg, Node)
(
_prev_node_input_type,
prev_node_output_type,
) = get_node_first_input_and_output_type(
first_arg, gm, logger_cls, node_type_to_io_type_map
)
return (prev_node_output_type, prev_node_output_type)
else:
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
elif node.op == "call_module":
assert node.op == "call_module"
assert isinstance(node.target, str)
mod = getattr_from_fqn(gm, node.target)
is_known_fp32_or_int8_input_module = any(
isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
)
if (
isinstance(mod, (logger_cls, ObserverBase, FakeQuantizeBase)) # type: ignore[arg-type]
or is_known_fp32_or_int8_input_module
):
# A logger or observer's input and output type is the output
# type of the preceding node.
first_arg = get_normalized_nth_input(node, gm, 0)
assert isinstance(first_arg, Node)
(
_prev_node_input_type,
prev_node_output_type,
) = get_node_first_input_and_output_type(
first_arg, gm, logger_cls, node_type_to_io_type_map
)
return (prev_node_output_type, prev_node_output_type)
is_known_fp32_input_module = any(
isinstance(mod, target_type) for target_type in MODS_IO_TYPE_FP32 # type: ignore[arg-type]
)
is_known_int8_input_module = any(
isinstance(mod, target_type) for target_type in MODS_IO_TYPE_INT8 # type: ignore[arg-type]
)
if is_known_fp32_input_module:
return (NodeInputOrOutputType.FP32, NodeInputOrOutputType.FP32)
elif is_known_int8_input_module:
return (NodeInputOrOutputType.INT8, NodeInputOrOutputType.INT8)
else:
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
elif node.op == "call_method":
if node.target == "dequantize":
# Dequantize is a special node because it allows multiple input types.
# So, we look up the output type of the previous node and return that
# as the input type of this node instance.
prev_node = get_normalized_nth_input(node, gm, 0)
assert isinstance(prev_node, Node)
(
_prev_node_input_type,
prev_node_output_type,
) = get_node_first_input_and_output_type(
prev_node, gm, logger_cls, node_type_to_io_type_map
)
return (prev_node_output_type, NodeInputOrOutputType.FP32)
elif node.target == "to":
# to is a special node because it allows multiple input types.
# So, we look up the output type of the previous node and return that
# as the input type of this node instance. We also look up the target
# of to and return the correct output type.
prev_node = get_normalized_nth_input(node, gm, 0)
assert isinstance(prev_node, Node)
(
_prev_node_input_type,
prev_node_output_type,
) = get_node_first_input_and_output_type(
prev_node, gm, logger_cls, node_type_to_io_type_map
)
cur_node_dtype_target = get_normalized_nth_input(node, gm, 1)
assert (
cur_node_dtype_target is torch.float16
), f"{cur_node_dtype_target} handling needs to be added"
return (prev_node_output_type, NodeInputOrOutputType.FP16)
elif node.target in METHS_IO_TYPE_FP32_OR_INT8:
first_arg = get_normalized_nth_input(node, gm, 0)
assert isinstance(first_arg, Node)
(
_prev_node_input_type,
prev_node_output_type,
) = get_node_first_input_and_output_type(
first_arg, gm, logger_cls, node_type_to_io_type_map
)
return (prev_node_output_type, prev_node_output_type)
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
else:
return (NodeInputOrOutputType.UNKNOWN, NodeInputOrOutputType.UNKNOWN)
def get_node_input_qparams(
node: Node,
gm: GraphModule,
node_type_to_io_type_map: Dict[str, Set[NSNodeTargetType]],
) -> Optional[Tuple[Union[torch.Tensor, float], Union[torch.Tensor, int]]]:
"""
Returns the qparams (scale, zero_point) of the first input to `node`,
if they can be inferred from the graph.
"""
prev_node = get_normalized_nth_input(node, gm, 0)
if not isinstance(prev_node, Node):
return None
MODS_IO_TYPE_FP32_OR_INT8 = node_type_to_io_type_map["mods_io_type_fp32_or_int8"]
def _get_scale_zp_from_function_args(node, gm, scale_arg_idx, zp_arg_idx):
scale_node = get_normalized_nth_input(node, gm, scale_arg_idx)
zp_node = get_normalized_nth_input(node, gm, zp_arg_idx)
assert isinstance(scale_node, Node) and isinstance(scale_node.target, str)
assert isinstance(zp_node, Node) and isinstance(zp_node.target, str)
scale_obj = getattr_from_fqn(gm, scale_node.target)
zp_obj = getattr_from_fqn(gm, zp_node.target)
return (scale_obj, zp_obj)
if prev_node.op == "call_function":
# quantize - read the args directly
if prev_node.target == torch.quantize_per_tensor:
return _get_scale_zp_from_function_args(prev_node, gm, 1, 2)
elif prev_node.target in (toq.add, toq.add_relu, toq.mul, toq.mul_relu):
return _get_scale_zp_from_function_args(prev_node, gm, 2, 3)
return None
# TODO(future PR): handle more functionals
# TODO(future PR): handle functional ops which inherit qparams from input
elif prev_node.op == "call_module":
# get type of the module
assert isinstance(prev_node.target, str)
module_obj = getattr_from_fqn(gm, prev_node.target)
if isinstance(
module_obj,
(
nnq.Linear,
nnq.Conv1d,
nnq.Conv2d,
nniq.ConvReLU2d,
nnq.Conv3d,
nnq.BatchNorm2d,
nnq.BatchNorm3d,
nnq.ConvTranspose1d,
nnq.ConvTranspose2d,
nnq.ELU,
nnq.GroupNorm,
nnq.InstanceNorm1d,
nnq.InstanceNorm2d,
nnq.InstanceNorm3d,
nnq.LayerNorm,
nnq.Hardswish,
nnq.LeakyReLU,
nnq.ReLU6,
nniq.BNReLU2d,
nniq.BNReLU3d,
nniq.ConvReLU1d,
nniq.ConvReLU2d,
nniq.ConvReLU3d,
nniq.LinearReLU,
),
):
return (module_obj.scale, module_obj.zero_point) # type: ignore[return-value]
is_known_fp32_or_int8_input_module = any(
isinstance(module_obj, target_type) for target_type in MODS_IO_TYPE_FP32_OR_INT8 # type: ignore[arg-type]
)
if is_known_fp32_or_int8_input_module:
return get_node_input_qparams(prev_node, gm, node_type_to_io_type_map)
return None
def return_first_non_observer_node(
node: Node,
gm: GraphModule,
) -> Node:
"""
If node is not an observer, returns it. If node is an observer,
navigates up the graph and returns the first parent which is not an
observer. For example,
graph: (node_non_obs), node = node_non_obs : returns node_non_obs
graph: (node_non_obs -> obs0), node = obs0 : returns node_non_obs
graph: (node_non_obs -> obs0 -> fq0), node = fq0 : returns node_non_obs
"""
if node.op == "call_module":
node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
if is_activation_post_process(node_obj):
assert len(node.args) == 1
assert isinstance(node.args[0], Node)
node = node.args[0]
# code duplication intended, not worth refactoring
assert isinstance(node.target, str)
node_obj = getattr_from_fqn(gm, node.target)
if is_activation_post_process(node_obj):
assert len(node.args) == 1
assert isinstance(node.args[0], Node)
node = node.args[0]
return node
def get_number_of_non_param_args(
node: Node,
gm: GraphModule,
) -> int:
"""
Assumes that all non-param args occur first. Returns the number of
non-param args expected for a node. For example, for
F.linear(x, weight, bias)
Returns 1, because x is a non-param arg and weight and bias are params.
For
lstm_mod(x, hid)
Returns 2, because both x and hid are non-param args.
"""
if node.op == "call_module":
node_obj = getattr_from_fqn(gm, node.target) # type: ignore[arg-type]
if isinstance(node_obj, nn.LSTM):
return 2
# default is 1
return 1
def get_arg_indices_of_inputs_to_log(node: Node) -> List[int]:
"""
Returns the indices of args of the node which we should attach
loggers to, if input logging is enabled.
For example,
* for (x + y), returns [0, 1]
* for (1 + y), returns [1]
* for (x + 1), returns [0]
* for (linear(x, w, b)) returns [0]
* by default, returns [0]
"""
if len(node.args) == 0:
return []
if node.op == "call_function" and (
# TODO(future PR): use relationship map instead of hardcoding
node.target in (torch.add, torch.ops.quantized.add, operator.add)
or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)
):
result = []
for i in range(2):
if type(node.args[i]) == Node:
result.append(i)
return result
return [0]
def get_target_type_str(node: Node, gm: GraphModule) -> str:
"""
Returns a string representation of the type of the function or module
pointed to by this node, or '' for other node types.
"""
target_type = ""
if node.op in ("call_function", "call_method"):
target_type = torch.typename(node.target)
elif node.op == "call_module":
assert isinstance(node.target, str)
target_mod = getattr_from_fqn(gm, node.target)
target_type = torch.typename(target_mod)
return target_type
def rekey_logger_info_on_node_name_of_model(
results: NSResultsType,
model_name: str,
) -> NSResultsType:
"""
Rekeys the layer name of a results dictionary to use node names
from `model_name`.
For example, transforms
{'base_op_1_0': {'node_output': {'model_a':
[{'ref_node_name': 'linear1', ...}]}}}
into
{'linear1': {'node_output': {'model_a':
[{'ref_node_name': 'linear1', ...}]}}}
Note: we cannot use these node names directly because they are not
guaranteed to be consistent across models. This is why we extract
the results first and rekey afterwards.
"""
new_results = {}
for old_layer_name, result_type_to_results in results.items():
new_layer_name = None
for _result_type, model_name_to_results in result_type_to_results.items():
for cur_model_name, list_of_results in model_name_to_results.items():
if cur_model_name == model_name:
assert len(list_of_results)
new_layer_name = list_of_results[0]["ref_node_name"]
else:
continue
if new_layer_name is not None:
new_results[new_layer_name] = result_type_to_results
else:
new_results[old_layer_name] = result_type_to_results
return new_results
def maybe_add_missing_fqns(results: NSResultsType) -> None:
"""
If `fqn` entries are filled in for one of the models in `results`, copies
them over to any models which do not have them filled out.
A common use case benefitting from this is comparing a model prepared by
quantization to a quantized model. In this case, the model prepared by
quantization would have `fqn` entries, and the quantized model would not.
"""
# Check in the first result to find any model with fqn entries defined.
model_name_with_fqns = None
for layer_name, result_type_to_results in results.items():
for result_type, model_name_to_results in result_type_to_results.items():
for model_name, model_results in model_name_to_results.items():
if len(model_results) > 0:
if model_results[0]["fqn"] is not None:
model_name_with_fqns = model_name
break
break
break
if model_name_with_fqns:
for layer_name, result_type_to_results in results.items():
for result_type, model_name_to_results in result_type_to_results.items():
ref_model_results = model_name_to_results[model_name_with_fqns]
for model_name, model_results in model_name_to_results.items():
if model_name == model_name_with_fqns:
continue
for i in range(len(model_results)):
fqn = ref_model_results[i]["fqn"]
model_results[i]["fqn"] = fqn
def maybe_dequantize_first_two_tensor_args_and_handle_tuples(f):
def inner(*args, **kwargs):
a0, a1, *a_other = args
if (isinstance(a0, tuple) and isinstance(a1, tuple)) or (
isinstance(a0, list) and isinstance(a1, list)
):
results = []
for el0, el1 in zip(a0, a1):
new_args = (el0, el1, *a_other)
results.append(inner(*new_args, **kwargs))
return results
elif isinstance(a0, torch.Tensor) and isinstance(a1, torch.Tensor):
if a0.is_quantized:
a0 = a0.dequantize()
if a1.is_quantized:
a1 = a1.dequantize()
# for the purposes of this util, only handle floats
if a0.dtype != torch.float or a1.dtype != torch.float:
return None
new_args = (a0, a1, *a_other)
return f(*new_args, **kwargs)
return inner
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
def compute_sqnr(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Computes the SQNR between `x` and `y`.
Args:
x: Tensor or tuple of tensors
y: Tensor or tuple of tensors
Return:
float or tuple of floats
"""
Ps = torch.norm(x)
Pn = torch.norm(x - y)
return 20 * torch.log10(Ps / Pn)
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
def compute_normalized_l2_error(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Computes the normalized L2 error between `x` and `y`.
Args:
x: Tensor or tuple of tensors
y: Tensor or tuple of tensors
Return:
float or tuple of floats
"""
return torch.sqrt(((x - y) ** 2).sum() / (x ** 2).sum())
@maybe_dequantize_first_two_tensor_args_and_handle_tuples
def compute_cosine_similarity(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Computes the cosine similarity between `x` and `y`.
Args:
x: Tensor or tuple of tensors
y: Tensor or tuple of tensors
Return:
float or tuple of floats
"""
# For convolutions, the shape of the quantized weight has one additional
# dimension compared to the shape of the fp32 weight. Match the shapes
# to enable cosine similarity comparison.
x = x.reshape(1, -1)
y = y.reshape(1, -1)
return torch.nn.functional.cosine_similarity(x, y)
def op_type_supports_shadowing(node: Node) -> bool:
if node.op == 'call_function':
if node.target in (torch.add, torch.mul, operator.add, operator.mul, torch.cat, torch.stack):
# shadowing for ops with multiple tensor inputs is not implemented yet
return False
return True
def get_normalized_nth_input(node: Node, gm: GraphModule, idx: int) -> Node:
"""
Given a node, gets the n'th input to that node, normalizing
args and kwargs to the best of its ability.
"""
try:
norm_args_and_kwargs = node.normalized_arguments(
gm, normalize_to_only_use_kwargs=True)
if norm_args_and_kwargs is not None:
norm_args, norm_kwargs = norm_args_and_kwargs
assert len(norm_args) + len(norm_kwargs) > idx
if idx < len(norm_args):
return norm_args[idx]
else:
# note: in Python 3.7+ dicts are ordered
return list(norm_kwargs.values())[idx]
else:
assert len(node.args) + len(node.kwargs) > idx
if idx < len(node.args):
return node.args[idx] # type: ignore[return-value]
else:
kwargs_idx = idx + len(node.args)
return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]
except RuntimeError:
# this RuntimeError happens when node argument normalization
# requires typehints to proceed, such as for torch.add where
# either the first, second or both arguments could be tensors
assert len(node.args) + len(node.kwargs) > idx
if idx < len(node.args):
return node.args[idx] # type: ignore[return-value]
else:
kwargs_idx = idx + len(node.args)
return list(node.kwargs.values())[kwargs_idx] # type: ignore[return-value]
|
pytorch-master
|
torch/ao/ns/fx/utils.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
toq = torch.ops.quantized
from torch.fx import GraphModule
from torch.fx.graph import Node
from torch.ao.quantization.utils import getattr_from_fqn
from .ns_types import NSNodeTargetType
from torch.ao.quantization.fx.backend_config_utils import get_native_quant_patterns
from torch.ao.quantization import (
ObserverBase,
FakeQuantizeBase,
)
from typing import Dict, Tuple, Set, Callable, Any, Union, List
def get_type_a_related_to_b(
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
) -> Set[Tuple[NSNodeTargetType, NSNodeTargetType]]:
# TODO(future PR): allow customizations
# TODO(future PR): reuse existing quantization mappings
# TODO(future PR): add the rest of modules and ops here
type_a_related_to_b: Set[Tuple[NSNodeTargetType, NSNodeTargetType]] = set()
for base_name, s in base_name_to_sets_of_related_ops.items():
s_list = list(s)
# add every bidirectional pair
for idx_0 in range(0, len(s_list)):
for idx_1 in range(idx_0, len(s_list)):
type_a_related_to_b.add((s_list[idx_0], s_list[idx_1]))
type_a_related_to_b.add((s_list[idx_1], s_list[idx_0]))
return type_a_related_to_b
NSFusionElType = Union[
Callable, # call_function or call_module type, example: F.linear or nn.Conv2d
str, # call_method name, example: "dequantize"
Tuple[str, Any], # call_method name and first argument, example: ("to", torch.float16)
]
NSFusionType = Union[
Tuple[NSFusionElType, NSFusionElType],
Tuple[NSFusionElType, NSFusionElType, NSFusionElType, NSFusionElType],
]
def get_reversed_fusions() -> List[Tuple[NSFusionType, int]]:
"""
Set of potential fusions, in reverse order. The order is reversed
to match how fusion patterns are defined in quantization code.
Fusion format:
((fusion_op_0, fusion_op_1), base_op_idx)
Where base_op_idx is the idx of the op we should use to match other related
ops. Note: base_op_idx is specified in non-reverse order, i.e. a base_op_idx
of 0 represents the first op in regular (non-reverse) order, 1 represents the
second op, etc.
"""
results: List[Tuple[NSFusionType, int]] = []
# Possible syntaxes:
# * single op: torch.nn.Conv2d
# * multiple ops: (torch.nn.ReLU, torch.nn.Conv2d)
# For fusions, we only care about patterns composed of multiple ops.
# TODO(future PR): allow customizations from default patterns.
all_quant_patterns = get_native_quant_patterns()
default_base_op_idx = 0
for quant_pattern, _quant_handler in all_quant_patterns.items():
# TODO: this is a temporary hack to flatten the patterns from quantization so
# that it works with the ns matcher function, maybe we should use `is_match`
# in torch.ao.quantization.fx.match_utils to match the patterns
if isinstance(quant_pattern, tuple) and len(quant_pattern) == 2 and \
isinstance(quant_pattern[1], tuple) and len(quant_pattern[1]) == 2:
# flatten the pattern with form (nn.ReLU, (nn.BatchNorm2d, nn.Conv2d))
quant_pattern = (quant_pattern[0], quant_pattern[1][0], quant_pattern[1][1])
# Only patterns of multiple ops are fusions, ignore
# patterns which contain a single ops (they get matched
# without caring about fusions).
if isinstance(quant_pattern, tuple):
results.append((quant_pattern, default_base_op_idx)) # type: ignore[arg-type]
# For each pattern, add additional patterns with observers and
# fake quants at the end.
# TODO(future PR): if needed, implement matching for a node
# having multiple output observers.
for cls in (ObserverBase, FakeQuantizeBase):
if isinstance(quant_pattern, tuple):
new_pattern = (cls, *quant_pattern)
else:
new_pattern = (cls, quant_pattern)
results.append((new_pattern, default_base_op_idx)) # type: ignore[arg-type]
# After this point, results countains values such as
# [..., ((torch.nn.Relu, torch.nn.Conv2d), 0), ...]
# Patterns for matching fp16 emulation are not specified in the quantization
# fusion mappings. For now, define them here.
fp16_em_base_op_idx = 1
patterns_to_add = [
# linear-relu fp16 emulation:
# fp16_to_fp32 -> linear -> relu -> fp32_to_fp16
((("to", torch.float16), F.relu, F.linear, "dequantize"), fp16_em_base_op_idx,),
# Conv-BN fusion (this happens outside of quantization patterns,
# which is why it is defined separately here).
((nn.BatchNorm1d, nn.Conv1d), default_base_op_idx),
((nn.BatchNorm2d, nn.Conv2d), default_base_op_idx),
((nn.BatchNorm3d, nn.Conv3d), default_base_op_idx),
((nn.ReLU, nn.BatchNorm1d, nn.Conv1d), default_base_op_idx),
((nn.ReLU, nn.BatchNorm2d, nn.Conv2d), default_base_op_idx),
((nn.ReLU, nn.BatchNorm3d, nn.Conv3d), default_base_op_idx),
]
for p in patterns_to_add:
results.append(p) # type: ignore[arg-type]
results.append(((ObserverBase, *p[0]), p[1])) # type: ignore[arg-type]
results.append(((FakeQuantizeBase, *p[0]), p[1])) # type: ignore[arg-type]
return results
def end_node_matches_reversed_fusion(
end_node: Node,
reversed_fusion: NSFusionType,
gm: GraphModule,
seen_nodes: Set[Node],
) -> bool:
"""
Returns true if a pattern ending with `end_node` matches
the fusion pattern.
"""
cur_node = end_node
for fusion_idx in range(len(reversed_fusion)):
# each node can only belong to one matched pattern
if cur_node in seen_nodes:
return False
cur_fusion_el = reversed_fusion[fusion_idx]
if cur_node.op == 'call_function':
fusion_el_is_fun = (not isinstance(cur_fusion_el, str)) and \
(not isinstance(cur_fusion_el, type))
if fusion_el_is_fun:
if cur_node.target != cur_fusion_el:
return False
if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
cur_node = cur_node.args[0]
else:
return False
else:
return False
elif cur_node.op == 'call_module':
fusion_el_is_mod = isinstance(cur_fusion_el, type)
if fusion_el_is_mod:
assert isinstance(cur_node.target, str)
target_mod = getattr_from_fqn(gm, cur_node.target)
if not isinstance(cur_fusion_el, type):
return False
if not isinstance(target_mod, cur_fusion_el):
return False
if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
cur_node = cur_node.args[0]
else:
return False
else:
return False
elif cur_node.op == 'call_method':
fusion_el_is_meth_with_second_arg = \
isinstance(cur_fusion_el, tuple) and len(cur_fusion_el) == 2
fusion_el_is_meth_without_args = isinstance(cur_fusion_el, str)
if fusion_el_is_meth_without_args or fusion_el_is_meth_with_second_arg:
if fusion_el_is_meth_without_args:
if cur_node.target != cur_fusion_el:
return False
else:
assert isinstance(cur_fusion_el, tuple)
if cur_node.target != cur_fusion_el[0]:
return False
elif len(cur_node.args) < 2:
return False
elif cur_node.args[1] != cur_fusion_el[1]:
return False
if len(cur_node.args) > 0 and isinstance(cur_node.args[0], Node):
cur_node = cur_node.args[0]
else:
return False
else:
return False
else:
return False
return True
|
pytorch-master
|
torch/ao/ns/fx/pattern_utils.py
|
import enum
from typing import NamedTuple
from torch.fx.graph import Node
from typing import Dict, Any, List, Union, Callable
class NSSingleResultValuesType(str, enum.Enum):
WEIGHT = 'weight'
NODE_OUTPUT = 'node_output'
NODE_INPUT = 'node_input'
NSSubgraph = NamedTuple(
'NSSubgraph',
[('start_node', Node), ('end_node', Node), ('base_op_node', Node)]
)
# TODO(future PR): see if we can use typing_extensions's TypedDict instead
# to properly type the various keys
# {
# # one of NSSingleResultValuesType
# 'type': 'weight',
# # the values of type specified above
# 'values': [torch.tensor(...), ...],
# # name of the node directly before the logger
# 'prev_node_name': 'linear1',
# # type of the underlying function or module
# 'prev_node_target_type': torch.nn.functional.linear # or torch.nn.Linear, etc
# # name of the node responsible for adding this logger
# # Note: this may differ from prev_node_name if we are logging inputs
# 'ref_node_name': 'linear1',
# # index of this node within the arg of the input/output node
# # for example, in cat([x1, x2, x3], dim=0), x2 would have index_within_arg == 1
# 'index_within_arg': 0,
# # index of this node within the args of the input/output node
# # for example, in add(x1, x2), x2 would have index_of_arg == 1
# 'index_of_arg': 0,
# }
NSSingleResultType = Dict[str, Any]
# {
# 'layer_name_1': { # subgraph name
# 'node_output': { # results type (node_output, node_input, weight)
# 'model_name_a': # model name
# [NSSingleResultType, ...], # results, ordered by index_within_arg
# 'model_name_b':
# [NSSingleResultType, ...],
# },
# },
# }
#
NSResultsType = Dict[str, Dict[str, Dict[str, List[NSSingleResultType]]]]
# Defines the underlying target type of a node, for example:
# `F.conv1d` for a `call_function` conv node
# `nn.Conv1d` for a `call_module` node calling the forward of a `nn.Conv1d` module
# `'sigmoid'` for a `call_method` node calling `x.sigmoid()`
NSNodeTargetType = Union[Callable, str]
|
pytorch-master
|
torch/ao/ns/fx/ns_types.py
|
import operator
import torch
import torch.nn as nn
import torch.nn.functional as F
toq = torch.ops.quantized
import torch.nn.quantized as nnq
import torch.nn.quantized.dynamic as nnqd
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized.dynamic as nniqd
import torch.nn.intrinsic.qat as nniqat
import torch.nn.intrinsic as nni
import torch.nn.qat as nnqat
import torch.nn.qat.dynamic as nnqatd
from torch.ao.quantization.backend_config import get_native_backend_config_dict
import torch.ao.quantization.fx._lower_to_native_backend as \
_lower_to_native_backend
import torch.ao.quantization.quantization_mappings as quantization_mappings
from .ns_types import NSNodeTargetType
from typing import Set, Dict, List, Optional
def get_base_name_to_sets_of_related_ops() -> Dict[str, Set[NSNodeTargetType]]:
# note: this set is modified below by items from backend_config_dict
sets_of_related_ops: List[Set[NSNodeTargetType]] = [
# conv modules
set([
nn.Conv1d,
]),
set([
nn.Conv2d,
]),
set([
nn.Conv3d,
]),
# conv functionals
set([
F.conv1d,
]),
set([
F.conv2d,
]),
set([
F.conv3d,
]),
# linear modules
set([
nn.Linear,
]),
# linear functionals
set([
F.linear,
]),
# average pool
set([
nn.AvgPool1d,
torch.avg_pool1d,
]),
set([
nn.AvgPool2d,
torch._C._nn.avg_pool2d,
]),
set([
nn.AvgPool3d,
torch._C._nn.avg_pool3d,
]),
# adaptive average pool
set([
nn.AdaptiveAvgPool1d,
F.adaptive_avg_pool1d,
]),
set([
nn.AdaptiveAvgPool2d,
F.adaptive_avg_pool2d,
]),
set([
nn.AdaptiveAvgPool3d,
F.adaptive_avg_pool3d,
]),
# LSTM
set([
nn.LSTM,
]),
# add
set([
torch.add,
operator.add, # x + y
]),
# cat
set([
torch.cat,
]),
# mul
set([
torch.mul,
operator.mul,
]),
# relu
set([
F.relu,
nn.ReLU,
'relu',
'relu_',
torch.relu,
]),
# maxpool
set([
nn.MaxPool1d,
F.max_pool1d,
]),
set([
nn.MaxPool2d,
F.max_pool2d,
]),
set([
nn.MaxPool3d,
F.max_pool3d,
]),
# sigmoid
set([
torch.sigmoid,
'sigmoid',
'sigmoid_',
nn.Sigmoid,
F.sigmoid,
]),
# BatchNorm
set([
nn.BatchNorm2d,
]),
set([
nn.BatchNorm3d,
]),
# ConvTranspose
set([
nn.ConvTranspose1d,
]),
set([
nn.ConvTranspose2d,
]),
set([
nn.ConvTranspose3d,
]),
# ELU
set([
nn.ELU,
]),
# Embedding
set([
nn.Embedding,
]),
# EmbeddingBag
set([
nn.EmbeddingBag,
]),
# GroupNorm
set([
nn.GroupNorm,
]),
# Hardswish
set([
nn.Hardswish,
]),
# InstanceNorm
set([
nn.InstanceNorm1d,
]),
set([
nn.InstanceNorm2d,
]),
set([
nn.InstanceNorm3d,
]),
# LayerNorm
set([
nn.LayerNorm,
]),
# LeakyReLU
set([
nn.LeakyReLU,
]),
# ReLU6
set([
nn.ReLU6,
F.relu6,
]),
# F.elu
set([
F.elu,
]),
# F.hardswish
set([
F.hardswish,
]),
# F.instance_norm
set([
F.instance_norm,
]),
# F.layer_norm
set([
F.layer_norm,
]),
# F.leaky_relu
set([
F.leaky_relu,
]),
# F.silu
set([
nn.SiLU,
F.silu,
]),
# F.mish
set([
nn.Mish,
F.mish,
]),
# F.tanh
set([
nn.Tanh,
F.tanh,
torch.tanh,
'tanh_',
'tanh',
]),
# F.hardsigmoid
set([
'hardsigmoid_',
'hardsigmoid',
F.hardsigmoid,
nn.Hardsigmoid,
]),
# F.hardtanh
set([
nn.Hardtanh,
F.hardtanh,
F.hardtanh_,
]),
# floordiv
set([
operator.floordiv,
]),
# unsqueeze
set([
torch.unsqueeze,
]),
# stack
set([
torch.stack,
]),
# squeeze
set([
torch.squeeze,
]),
# sort
set([
torch.sort,
]),
# repeat_interleave
set([
torch.repeat_interleave,
]),
# min
set([
torch.min,
]),
# mean
set([
torch.mean,
]),
# max
set([
torch.max,
]),
# transpose
set([
torch.transpose,
]),
# flatten
set([
torch.flatten,
]),
# clamp
set([
torch.clamp,
]),
# chunk
set([
torch.chunk,
]),
# interpolate
set([
torch.nn.functional.interpolate,
]),
# dropout
set([
nn.Dropout,
]),
# F.dropout
set([
F.dropout,
]),
# matmul
set([
torch.matmul,
]),
# Softmax
set([
nn.Softmax,
]),
# PReLU
set([
nn.PReLU,
nnq.PReLU,
]),
# F.prelu
set([
F.prelu,
toq.prelu,
]),
]
# for each floating point op, add versions of the op added by
# backend_config_dict
backend_config_dict = get_native_backend_config_dict()
new_connections = [
# technical debt edge case
(nn.Linear, nn.modules.linear.NonDynamicallyQuantizableLinear),
]
for config in backend_config_dict['configs']:
if 'pattern' not in config:
continue
# format: (c, (b, a))
pattern = config['pattern']
first_element = pattern
# look from the end, because pattern is in reverse order
while isinstance(first_element, (list, tuple)):
first_element = first_element[-1]
if 'fused_module' in config:
# case 1: pattern fuses a pattern of ops into an op
# example: nn.Conv1d, nn.ReLU fused into nni.ConvReLU1d
new_connections.append((first_element, config['fused_module']))
if 'qat_module' in config:
# case 2: pattern swaps a module into a QAT module
# example: nni.ConvReLU1d swapped into nniqat.ConvReLU1d
new_connections.append((first_element, config['qat_module']))
if 'reference_quantized_module_for_root' in config:
# case 3: reference version of floating point module, such as
# nn.Conv2d and nnqr.Conv2d
new_connections.append(
(first_element, config['reference_quantized_module_for_root'])
)
#
# Add reference module swaps from default lowering path
#
for source_to_target in (
_lower_to_native_backend.STATIC_LOWER_MODULE_MAP,
_lower_to_native_backend.DYNAMIC_LOWER_MODULE_MAP,
_lower_to_native_backend.WEIGHT_ONLY_LOWER_MODULE_MAP,
_lower_to_native_backend.SPECIAL_PATTERN_LOWER_MODULE_MAP,
):
for source, target in source_to_target.items(): # type: ignore[attr-defined]
new_connections.append((source, target))
for source_to_double_target in (
_lower_to_native_backend.STATIC_LOWER_FUSED_MODULE_MAP,
_lower_to_native_backend.DYNAMIC_LOWER_FUSED_MODULE_MAP,
):
for source, (target1, target2) in source_to_double_target.items(): # type: ignore[attr-defined]
new_connections.append((source, target1))
new_connections.append((source, target2))
#
# Add function swaps from default lowering path
#
for source, (target1, target2) in \
_lower_to_native_backend.STATIC_LOWER_FUNCTIONAL_MAP.items():
new_connections.append((source, target1))
new_connections.append((source, target2))
for source_to_target in (
_lower_to_native_backend.QBIN_OP_MAPPING,
_lower_to_native_backend.QBIN_RELU_OP_MAPPING,
quantization_mappings.DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS,
):
for source, target in source_to_target.items():
new_connections.append((source, target))
#
# Add other swaps, ideally in the future this could be removed
# after the lowering code stops using these.
#
for source_to_target in (
quantization_mappings.DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
):
for source, target in source_to_target.items():
new_connections.append((source, target))
# add the new connections from backend_config_dict
for item1, item2 in new_connections:
for set_of_related_ops in sets_of_related_ops:
if item1 in set_of_related_ops or item2 in set_of_related_ops:
set_of_related_ops.add(item1)
set_of_related_ops.add(item2)
break
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]] = {}
counter = 0
for set_of_related_ops in sets_of_related_ops:
base_name = str(counter)
counter += 1
base_name_to_sets_of_related_ops[base_name] = set_of_related_ops
return base_name_to_sets_of_related_ops
def get_base_name_for_op(
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
op: NSNodeTargetType,
) -> Optional[str]:
for base_name, set_of_related_ops in base_name_to_sets_of_related_ops.items():
if op in set_of_related_ops:
return base_name
return None
def add_op_to_sets_of_related_ops(
base_name_to_sets_of_related_ops: Dict[str, Set[NSNodeTargetType]],
op: NSNodeTargetType,
related_op: Optional[NSNodeTargetType],
) -> None:
if related_op is not None:
for base_name, set_of_related_ops in base_name_to_sets_of_related_ops.items():
if related_op in set_of_related_ops:
set_of_related_ops.add(op)
return
# if we got here, related_op was not found
raise AssertionError(f"{related_op} was not found")
else:
counter = 0
while str(counter) in base_name_to_sets_of_related_ops:
counter += 1
base_name_to_sets_of_related_ops[str(counter)] = set([op])
# TODO(future PR): clean this up
def get_node_type_to_io_type_map() -> Dict[str, Set[NSNodeTargetType]]:
FUNS_IO_TYPE_FP32: Set[NSNodeTargetType] = set([
F.linear,
F.conv1d,
F.conv2d,
F.conv3d,
torch.cat,
F.elu,
F.hardswish,
F.instance_norm,
F.layer_norm,
F.leaky_relu,
F.dropout,
F.silu,
F.mish,
operator.add,
torch.add,
operator.mul,
torch.mul,
torch.sum,
F.prelu,
])
FUNS_IO_TYPE_FP16: Set[NSNodeTargetType] = set()
FUNS_IO_TYPE_INT8: Set[NSNodeTargetType] = set([
toq.linear,
toq.linear_relu,
toq.conv1d,
toq.conv1d_relu,
toq.conv2d,
toq.conv2d_relu,
toq.conv3d,
toq.conv3d_relu,
toq.cat,
toq.elu,
toq.hardswish,
toq.instance_norm,
toq.layer_norm,
toq.leaky_relu,
toq.dropout,
toq.prelu,
# TODO(future PR): implement shadowing for binary ops and
# uncomment below
# toq.add,
# toq.mul,
])
FUNS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = set([
F.relu,
F.tanh,
torch.tanh,
F.sigmoid,
torch.sigmoid,
F.hardsigmoid,
operator.floordiv,
torch.adaptive_avg_pool1d,
F.adaptive_avg_pool2d,
F.adaptive_avg_pool3d,
F.dropout,
F.hardtanh,
F.hardtanh_,
F.interpolate,
F.max_pool1d,
F.max_pool2d,
F.max_pool3d,
F.relu6,
torch.avg_pool1d,
torch._C._nn.avg_pool2d,
torch._C._nn.avg_pool3d,
torch.cat,
torch.chunk,
torch.clamp,
torch.flatten,
torch.transpose,
torch.max,
torch.mean,
torch.min,
torch.repeat_interleave,
torch.sort,
torch.squeeze,
torch.stack,
torch.unsqueeze,
operator.add,
])
MODS_IO_TYPE_FP32: Set[NSNodeTargetType] = set([
nn.Linear,
nnqat.Linear,
nnqatd.Linear,
nnqd.Linear,
torch.nn.modules.linear.NonDynamicallyQuantizableLinear,
nn.Conv1d,
nn.Conv2d,
nn.Conv3d,
nnqat.Conv1d,
nnqat.Conv2d,
nnqat.Conv3d,
nnqat.Embedding,
nnqat.EmbeddingBag,
nn.LSTM,
# note: nnqd.Linear is an instance of nnq.Linear, so this
# check has to happen before the int8 module check
nnqd.LSTM,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.Dropout,
nn.ConvTranspose1d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
nn.ELU,
nn.GroupNorm,
nn.InstanceNorm1d,
nn.InstanceNorm2d,
nn.InstanceNorm3d,
nn.LayerNorm,
nn.Hardswish,
nn.LeakyReLU,
nn.ReLU6,
nn.SiLU,
nn.Mish,
nn.Softmax,
nn.PReLU,
nni.BNReLU2d,
nni.BNReLU3d,
nni.ConvReLU1d,
nni.ConvReLU2d,
nni.ConvReLU3d,
nni.LinearReLU,
nni.LinearBn1d,
nni.ConvBn1d,
nni.ConvBn2d,
nni.ConvBn3d,
nniqat.ConvBn1d,
nniqat.ConvBn2d,
nniqat.ConvBn3d,
nniqat.ConvBnReLU1d,
nniqat.ConvBnReLU2d,
nniqat.ConvBnReLU3d,
nniqat.ConvReLU1d,
nniqat.ConvReLU2d,
nniqat.ConvReLU3d,
nniqat.LinearReLU,
nniqat.LinearBn1d,
nniqd.LinearReLU,
])
MODS_IO_TYPE_INT8: Set[NSNodeTargetType] = set([
nnq.Linear,
nnq.Conv1d,
nnq.Conv2d,
nnq.Conv3d,
nnq.BatchNorm2d,
nnq.BatchNorm3d,
nnq.Dropout,
nnq.ConvTranspose1d,
nnq.ConvTranspose2d,
nnq.ELU,
nnq.InstanceNorm1d,
nnq.InstanceNorm2d,
nnq.InstanceNorm3d,
nnq.LayerNorm,
nnq.Hardswish,
nnq.LeakyReLU,
nnq.Embedding,
nnq.EmbeddingBag,
nnq.Dropout,
nnq.Softmax,
nnq.PReLU,
nniq.BNReLU2d,
nniq.BNReLU3d,
nniq.ConvReLU1d,
nniq.ConvReLU2d,
nniq.ConvReLU3d,
nniq.LinearReLU,
])
MODS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = set([
nn.ReLU,
nn.Tanh,
nn.Sigmoid,
nn.Hardsigmoid,
nn.AdaptiveAvgPool1d,
nn.AdaptiveAvgPool2d,
nn.AdaptiveAvgPool3d,
nn.AvgPool1d,
nn.AvgPool2d,
nn.AvgPool3d,
nn.Dropout,
nn.Hardtanh,
nn.Identity,
nn.MaxPool1d,
nn.MaxPool2d,
nn.MaxPool3d,
nn.ReLU6,
])
METHS_IO_TYPE_FP32_OR_INT8: Set[NSNodeTargetType] = set([
'sigmoid_',
'sigmoid',
'tanh_',
'tanh',
'hardsigmoid_',
'hardsigmoid',
'relu_',
'relu',
])
return {
'funs_io_type_fp32': FUNS_IO_TYPE_FP32,
'funs_io_type_fp16': FUNS_IO_TYPE_FP16,
'funs_io_type_int8': FUNS_IO_TYPE_INT8,
'funs_io_type_fp32_or_int8': FUNS_IO_TYPE_FP32_OR_INT8,
'mods_io_type_fp32': MODS_IO_TYPE_FP32,
'mods_io_type_int8': MODS_IO_TYPE_INT8,
'mods_io_type_fp32_or_int8': MODS_IO_TYPE_FP32_OR_INT8,
'meths_io_type_fp32_or_int8': METHS_IO_TYPE_FP32_OR_INT8,
}
def get_unmatchable_types_map() -> Dict[str, Set[NSNodeTargetType]]:
FUNS_UNMATCHABLE: Set[NSNodeTargetType] = set([
torch.quantize_per_tensor,
operator.getitem,
])
MODS_UNMATCHABLE: Set[NSNodeTargetType] = set([
nn.Identity,
])
METHS_UNMATCHABLE: Set[NSNodeTargetType] = set([
'to',
'dequantize',
'reshape',
'view',
'unsqueeze_',
'unsqueeze',
'transpose',
'squeeze_',
'squeeze',
'size',
'shape',
'resize_',
'repeat_interleave',
'repeat',
'permute',
'numel',
'mean',
'detach_',
'detach',
'contiguous',
'clamp',
'chunk',
])
return {
'funs_unmatchable': FUNS_UNMATCHABLE,
'mods_unmatchable': MODS_UNMATCHABLE,
'meths_unmatchable': METHS_UNMATCHABLE,
}
|
pytorch-master
|
torch/ao/ns/fx/mappings.py
|
"""
This module implements observers which are used to collect statistics about
the values observed during calibration (PTQ) or training (QAT).
"""
import re
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from functools import partial
from typing import Any, List, Tuple, Optional, Dict
import torch
import torch.nn as nn
from torch.ao.quantization.utils import check_min_max_valid, calculate_qmin_qmax
__all__ = [
"default_affine_fixed_qparams_observer",
"default_debug_observer",
"default_dynamic_quant_observer",
"default_fixed_qparams_range_0to1_observer",
"default_fixed_qparams_range_neg1to1_observer",
"default_float_qparams_observer",
"default_float_qparams_observer_4bit",
"default_histogram_observer",
"default_observer",
"default_per_channel_weight_observer",
"default_placeholder_observer",
"default_reuse_input_observer",
"default_symmetric_fixed_qparams_observer",
"default_weight_observer",
"get_observer_state_dict",
"load_observer_state_dict",
"per_channel_weight_observer_range_neg_127_to_127",
"weight_observer_range_neg_127_to_127",
"FixedQParamsObserver",
"HistogramObserver",
"MinMaxObserver",
"MovingAverageMinMaxObserver",
"MovingAveragePerChannelMinMaxObserver",
"NoopObserver",
"ObserverBase",
"PerChannelMinMaxObserver",
"PlaceholderObserver",
"RecordingObserver",
"ReuseInputObserver",
"UniformQuantizationObserverBase",
]
class _PartialWrapper(object):
def __init__(self, p):
self.p = p
self.callable_args = {}
def __call__(self, *args, **keywords):
# call each arg in callable_args and add them partial, then run with keywords
# skip if arg_name in keywords so its possible to overwrite
for arg_name in self.callable_args:
if arg_name not in keywords:
keywords = {**keywords, **{arg_name: self.callable_args[arg_name]()}}
return self.p(*args, **keywords)
def __repr__(self):
return self.p.__repr__() + self.callable_args.__repr__()
def with_args(self, **kwargs):
return _with_args(self, **kwargs)
def with_callable_args(self, **kwargs):
result = _PartialWrapper(p=self.p)
result.callable_args = {**self.callable_args, **kwargs}
return result
def _with_args(cls_or_self, **kwargs):
r"""Wrapper that allows creation of class factories.
This can be useful when there is a need to create classes with the same
constructor arguments, but different instances. Can be used in conjunction with
_callable_args
Example::
>>> # xdoctest: +SKIP("Undefined vars")
>>> Foo.with_args = classmethod(_with_args)
>>> foo_builder = Foo.with_args(a=3, b=4).with_args(answer=42)
>>> foo_instance1 = foo_builder()
>>> foo_instance2 = foo_builder()
>>> id(foo_instance1) == id(foo_instance2)
False
"""
r = _PartialWrapper(partial(cls_or_self, **kwargs))
return r
def _with_callable_args(cls_or_self, **kwargs):
r"""Wrapper that allows creation of class factories args that need to be
called at construction time.
This can be useful when there is a need to create classes with the same
constructor arguments, but different instances and those arguments should only
be calculated at construction time. Can be used in conjunction with _with_args
Example::
>>> # xdoctest: +SKIP("Undefined vars")
>>> Foo.with_callable_args = classmethod(_with_callable_args)
>>> Foo.with_args = classmethod(_with_args)
>>> foo_builder = Foo.with_callable_args(cur_time=get_time_func).with_args(name="dan")
>>> foo_instance1 = foo_builder()
>>> # wait 50
>>> foo_instance2 = foo_builder()
>>> id(foo_instance1.creation_time) == id(foo_instance2.creation_time)
False
"""
r = _PartialWrapper(partial(cls_or_self))
return r.with_callable_args(**kwargs)
ABC: Any = ABCMeta(str("ABC"), (object,), {}) # compatible with Python 2 *and* 3:
class ObserverBase(ABC, nn.Module):
r"""Base observer Module.
Any observer implementation should derive from this class.
Concrete observers should follow the same API. In forward, they will update
the statistics of the observed Tensor. And they should provide a
`calculate_qparams` function that computes the quantization parameters given
the collected statistics.
Args:
dtype: Quantized data type
"""
def __init__(self, dtype):
super(ObserverBase, self).__init__()
self.dtype = dtype
@abstractmethod
def forward(self, x):
pass
@abstractmethod
def calculate_qparams(self, **kwargs):
pass
with_args = classmethod(_with_args)
with_callable_args = classmethod(_with_callable_args)
class UniformQuantizationObserverBase(ObserverBase):
r"""Common base for all observers using uniform quantization to calculate
scale and zero_point.
Args:
dtype: Quantized data type.
qscheme: Quantization scheme to be used.
reduce_range: Reduces the range of the quantized data type by 1 bit.
This is sometimes required to avoid instruction overflow.
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
.. warning::
:attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``.
.. warning::
:attr:`qscheme` can only take one of the following options:
- ``torch.per_tensor_affine``
- ``torch.per_tensor_symmetric``
- ``torch.per_channel_affine``
- ``torch.per_channel_symmetric``
"""
# Note: the version is shared by all observer types
#
# Version 1/None
# self
#
# Version 2 (base class only, does not include child class buffers)
# self
# |--- eps : Tensor
#
# Version 3
# for HistogramObserver only, changed the shape of uninitialized
# min_val and max_val buffers from torch.Size([0]) to torch.Size([])
# for PerChannelObservers, changed the name of the buffers from min_vals
# to min_val and from max_vals to max_val.
_version = 3
eps: torch.Tensor
def __init__(
self,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
) -> None:
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
super().__init__(dtype=dtype)
self.qscheme = qscheme
if reduce_range:
warnings.warn(
"Please use quant_min and quant_max to specify the range for observers. \
reduce_range will be deprecated in a future release of PyTorch."
)
self.reduce_range = reduce_range
self.register_buffer(
"eps", torch.tensor([eps], **factory_kwargs)
)
assert self.qscheme in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
), "Default Observer only works for per_tensor_affine, \
per_tensor_symmetric, per_channel_affine, \
per_channel_symmetric and per_channel_float_qparams quantization scheme"
assert self.dtype in (
torch.qint8,
torch.quint8,
torch.quint4x2,
torch.qint32,
), "Default Observer only works for qint8, quint8 and quint4x2 data type"
self.has_customized_qrange = (quant_min is not None) and (quant_max is not None)
if self.has_customized_qrange:
self._validate_qmin_qmax(quant_min, quant_max)
self.quant_min, self.quant_max = \
calculate_qmin_qmax(quant_min, quant_max, self.has_customized_qrange, self.dtype, self.reduce_range)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version == 1:
# eps was moved to a buffer in version 2
eps = torch.tensor([torch.finfo(torch.float32).eps])
state_dict[prefix + "eps"] = eps
super(ObserverBase, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def _validate_qmin_qmax(self, quant_min: int, quant_max: int) -> None:
r"""Validates that the user-specified quantization range is properly initialized
and within the given bound supported by the observer dtype.
To accommodate lower-bit quantization with respect to the existing torch.qint8 and
torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing
in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax
values are used to calculate static estimates of the scale and zero point for aggressive lower-bit
fake quantization. These estimates are compared against parameters learned through backpropagation.
The related literatures for scale and zero point via backpropagation are as follows:
Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS
Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf
"""
# The variable names are prefixed with "initial" because their values (qmin and qmax) might be adjusted
# based on whether quantization range is reduced and the datatype (signed/unsigned) used by the observer.
assert (
quant_min <= 0 <= quant_max
), "Used-specified quantization range must include 0."
assert (
quant_min < quant_max
), "qmin must be strictly less than qmax for user-specified quantization range."
@torch.jit.export
def _calculate_qparams(
self, min_val: torch.Tensor, max_val: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Calculates the quantization parameters, given min and max
value tensors. Works for both per tensor and per channel cases
Args:
min_val: Minimum values per channel
max_val: Maximum values per channel
Returns:
scales: Scales tensor of shape (#channels,)
zero_points: Zero points tensor of shape (#channels,)
"""
if not check_min_max_valid(min_val, max_val):
return torch.tensor([1.0], device=min_val.device.type), torch.tensor([0], device=min_val.device.type)
quant_min, quant_max = self.quant_min, self.quant_max
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
device = min_val_neg.device
scale = torch.ones(min_val_neg.size(), dtype=torch.float32, device=device)
zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)
if (
self.qscheme == torch.per_tensor_symmetric
or self.qscheme == torch.per_channel_symmetric
):
max_val_pos = torch.max(-min_val_neg, max_val_pos)
scale = max_val_pos / (float(quant_max - quant_min) / 2)
scale = torch.max(scale, self.eps)
if self.dtype == torch.quint8:
if self.has_customized_qrange:
# When customized quantization range is used, down-rounded midpoint of the range is chosen.
zero_point = zero_point.new_full(
zero_point.size(), (quant_min + quant_max) // 2
)
else:
zero_point = zero_point.new_full(zero_point.size(), 128)
elif self.qscheme == torch.per_channel_affine_float_qparams:
scale = (max_val - min_val) / float(quant_max - quant_min)
scale = torch.where(scale > self.eps, scale, torch.ones_like(scale))
# We use the quantize function
# xq = Round(Xf * inv_scale + zero_point),
# setting zero_point to (-1 * min *inv_scale) we get
# Xq = Round((Xf - min) * inv_scale)
zero_point = -1 * min_val / scale
else:
scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)
scale = torch.max(scale, self.eps)
zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)
zero_point = torch.clamp(zero_point, quant_min, quant_max)
# For scalar values, cast them to Tensors of size 1 to keep the shape
# consistent with default values in FakeQuantize.
if len(scale.shape) == 0:
# TODO: switch to scale.item() after adding JIT support
scale = torch.tensor([float(scale)], dtype=scale.dtype, device=device)
if len(zero_point.shape) == 0:
# TODO: switch to zero_point.item() after adding JIT support
zero_point = torch.tensor(
[int(zero_point)], dtype=zero_point.dtype, device=device
)
if self.qscheme == torch.per_channel_affine_float_qparams:
zero_point = torch.tensor(
[float(zero_point)], dtype=zero_point.dtype, device=device
)
return scale, zero_point
@torch.jit.export
def reset_min_max_vals(self):
raise NotImplementedError("Cannot reset min/max values in the given observer.")
# Originally, this class was called `_ObserverBase`. Keeping the old name around
# for backwards compatibility.
# TODO(after v1.13): delete this
_ObserverBase = UniformQuantizationObserverBase
class MinMaxObserver(UniformQuantizationObserverBase):
r"""Observer module for computing the quantization parameters based on the
running min and max values.
This observer uses the tensor min/max statistics to compute the quantization
parameters. The module records the running minimum and maximum of incoming
tensors, and uses this statistic to compute the quantization parameters.
Args:
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
Given running min/max as :math:`x_\text{min}` and :math:`x_\text{max}`,
scale :math:`s` and zero point :math:`z` are computed as:
The running minimum/maximum :math:`x_\text{min/max}` is computed as:
.. math::
\begin{array}{ll}
x_\text{min} &= \begin{cases}
\min(X) & \text{if~}x_\text{min} = \text{None} \\
\min\left(x_\text{min}, \min(X)\right) & \text{otherwise}
\end{cases}\\
x_\text{max} &= \begin{cases}
\max(X) & \text{if~}x_\text{max} = \text{None} \\
\max\left(x_\text{max}, \max(X)\right) & \text{otherwise}
\end{cases}\\
\end{array}
where :math:`X` is the observed tensor.
The scale :math:`s` and zero point :math:`z` are then computed as:
.. math::
\begin{aligned}
\text{if Symmetric:}&\\
&s = 2 \max(|x_\text{min}|, x_\text{max}) /
\left( Q_\text{max} - Q_\text{min} \right) \\
&z = \begin{cases}
0 & \text{if dtype is qint8} \\
128 & \text{otherwise}
\end{cases}\\
\text{Otherwise:}&\\
&s = \left( x_\text{max} - x_\text{min} \right ) /
\left( Q_\text{max} - Q_\text{min} \right ) \\
&z = Q_\text{min} - \text{round}(x_\text{min} / s)
\end{aligned}
where :math:`Q_\text{min}` and :math:`Q_\text{max}` are the minimum and
maximum of the quantized data type.
.. warning:: :attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``.
.. note:: If the running minimum equals to the running maximum, the scale
and zero_point are set to 1.0 and 0.
"""
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
) -> None:
# For x86 quantized kernels, we need to ensure that the vpmaddubsw
# instruction does not overflow. We allow for a reduce_range argument to
# observers that reduces the quantized range to (0,127) or (-64, 63).
# For more details see aten/src/ATen/native/quantized/cpu/qconv.cpp
# This is not an optimal choice for non x86 backends as it loses a bit
# of precision for activations.
super(MinMaxObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs))
self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs))
if (
self.qscheme == torch.per_tensor_symmetric
and self.reduce_range
and self.dtype == torch.quint8
):
raise NotImplementedError(
"Cannot reduce range for symmetric \
quantization for quint8"
)
def forward(self, x_orig):
r"""Records the running minimum and maximum of ``x``."""
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
x = x.to(self.min_val.dtype)
min_val_cur, max_val_cur = torch.aminmax(x)
min_val = torch.min(min_val_cur, self.min_val)
max_val = torch.max(max_val_cur, self.max_val)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
@torch.jit.export
def calculate_qparams(self):
r"""Calculates the quantization parameters."""
return self._calculate_qparams(self.min_val, self.max_val)
@torch.jit.export
def extra_repr(self):
return "min_val={}, max_val={}".format(self.min_val, self.max_val)
@torch.jit.export
def reset_min_max_vals(self):
"""Resets the min/max values."""
self.min_val.copy_(torch.tensor(float("inf")))
self.max_val.copy_(torch.tensor(float("-inf")))
class MovingAverageMinMaxObserver(MinMaxObserver):
r"""Observer module for computing the quantization parameters based on the
moving average of the min and max values.
This observer computes the quantization parameters based on the moving
averages of minimums and maximums of the incoming tensors. The module
records the average minimum and maximum of incoming tensors, and uses this
statistic to compute the quantization parameters.
Args:
averaging_constant: Averaging constant for min/max.
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
The moving average min/max is computed as follows
.. math::
\begin{array}{ll}
x_\text{min} = \begin{cases}
\min(X) & \text{if~}x_\text{min} = \text{None} \\
(1 - c) x_\text{min} + c \min(X) & \text{otherwise}
\end{cases}\\
x_\text{max} = \begin{cases}
\max(X) & \text{if~}x_\text{max} = \text{None} \\
(1 - c) x_\text{max} + c \max(X) & \text{otherwise}
\end{cases}\\
\end{array}
where :math:`x_\text{min/max}` is the running average min/max, :math:`X` is
is the incoming tensor, and :math:`c` is the ``averaging_constant``.
The scale and zero point are then computed as in
:class:`~torch.ao.quantization.observer.MinMaxObserver`.
.. note:: Only works with ``torch.per_tensor_affine`` quantization scheme.
.. note:: If the running minimum equals to the running maximum, the scale
and zero_point are set to 1.0 and 0.
"""
def __init__(
self,
averaging_constant=0.01,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
eps=torch.finfo(torch.float32).eps,
**kwargs
) -> None:
self.averaging_constant = averaging_constant
super(MovingAverageMinMaxObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
eps=eps,
**kwargs
)
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
x = x.to(self.min_val.dtype)
min_val = self.min_val
max_val = self.max_val
if min_val == float("inf") and max_val == float("-inf"):
min_val, max_val = torch.aminmax(x)
else:
min_val_cur, max_val_cur = torch.aminmax(x)
min_val = min_val + self.averaging_constant * (min_val_cur - min_val)
max_val = max_val + self.averaging_constant * (max_val_cur - max_val)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
class PerChannelMinMaxObserver(UniformQuantizationObserverBase):
r"""Observer module for computing the quantization parameters based on the
running per channel min and max values.
This observer uses the tensor min/max statistics to compute the per channel
quantization parameters. The module records the running minimum and maximum
of incoming tensors, and uses this statistic to compute the quantization
parameters.
Args:
ch_axis: Channel axis
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
The quantization parameters are computed the same way as in
:class:`~torch.ao.quantization.observer.MinMaxObserver`, with the difference
that the running min/max values are stored per channel.
Scales and zero points are thus computed per channel as well.
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
ch_axis=0,
dtype=torch.quint8,
qscheme=torch.per_channel_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
) -> None:
super(PerChannelMinMaxObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.ch_axis = ch_axis
self.register_buffer("min_val", torch.tensor([], **factory_kwargs))
self.register_buffer("max_val", torch.tensor([], **factory_kwargs))
if (
self.qscheme == torch.per_channel_symmetric
and self.reduce_range
and self.dtype == torch.quint8
):
raise NotImplementedError(
"Cannot reduce range for symmetric quantization for quint8"
)
def forward(self, x_orig):
return self._forward(x_orig)
def _forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
min_val = self.min_val
max_val = self.max_val
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
# Need to match dtype of min/max because the updates to buffers
# are done in place and types need to match for comparisons
y = y.to(self.min_val.dtype)
y = torch.flatten(y, start_dim=1)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val, max_val = torch.aminmax(y, dim=1)
else:
min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
min_val = torch.min(min_val_cur, min_val)
max_val = torch.max(max_val_cur, max_val)
self.min_val.resize_(min_val.shape)
self.max_val.resize_(max_val.shape)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
@torch.jit.export
def calculate_qparams(self):
return self._calculate_qparams(self.min_val, self.max_val)
def extra_repr(self):
return "min_val={}, max_val={}".format(self.min_val, self.max_val)
def _load_from_state_dict(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, torch.Tensor],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
):
version = local_metadata.get("version", None)
if version is None or version < 3:
local_state = ["min_vals", "max_vals"]
expected_min_name = "min_vals"
expected_max_name = "max_vals"
else:
local_state = ["min_val", "max_val"]
expected_min_name = "min_val"
expected_max_name = "max_val"
for name in local_state:
key = prefix + name
if key in state_dict:
val = state_dict[key]
# Custom handling to allow loading min_val or max_val
# of size N into uninitialized buffers of size 0. The
# buffers are resized here, and the values are copied in
# the default state_dict loading code of the parent.
if name == expected_min_name:
self.min_val.resize_(val.shape)
elif name == expected_max_name:
self.max_val.resize_(val.shape)
else:
warnings.warn("Observer load_from_state_dict got unexpected name {}".format(name))
# For torchscript module we need to update the attributes here since we do not
# call the `_load_from_state_dict` function defined module.py
if torch.jit.is_scripting():
if name == expected_min_name:
self.min_val.copy_(val)
elif name == expected_max_name:
self.max_val.copy_(val)
else:
warnings.warn("Observer load_from_state_dict got unexpected name {}".format(name))
elif strict:
missing_keys.append(key)
if not torch.jit.is_scripting():
super(PerChannelMinMaxObserver, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
def _load_from_state_dict_script(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, torch.Tensor],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
):
self._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def reset_min_max_vals(self):
"""Resets the min/max values."""
self.min_val = torch.tensor([])
self.max_val = torch.tensor([])
class MovingAveragePerChannelMinMaxObserver(PerChannelMinMaxObserver):
r"""Observer module for computing the quantization parameters based on the
running per channel min and max values.
This observer uses the tensor min/max statistics to compute the per channel
quantization parameters. The module records the running minimum and maximum
of incoming tensors, and uses this statistic to compute the quantization
parameters.
Args:
averaging_constant: Averaging constant for min/max.
ch_axis: Channel axis
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
The quantization parameters are computed the same way as in
:class:`~torch.ao.quantization.observer.MovingAverageMinMaxObserver`, with the
difference that the running min/max values are stored per channel.
Scales and zero points are thus computed per channel as well.
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
def __init__(
self,
averaging_constant=0.01,
ch_axis=0,
dtype=torch.quint8,
qscheme=torch.per_channel_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
eps=torch.finfo(torch.float32).eps,
**kwargs
) -> None:
super(MovingAveragePerChannelMinMaxObserver, self).__init__(
ch_axis=ch_axis,
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
eps=eps,
**kwargs
)
self.averaging_constant = averaging_constant
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach() # avoid keeping autograd tape
x = x.to(self.min_val.dtype)
min_val = self.min_val
max_val = self.max_val
x_dim = x.size()
new_axis_list = [i for i in range(len(x_dim))] # noqa: C416
new_axis_list[self.ch_axis] = 0
new_axis_list[0] = self.ch_axis
y = x.permute(new_axis_list)
y = torch.flatten(y, start_dim=1)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val, max_val = torch.aminmax(y, dim=1)
else:
min_val_cur, max_val_cur = torch.aminmax(y, dim=1)
min_val = min_val + self.averaging_constant * (min_val_cur - min_val)
max_val = max_val + self.averaging_constant * (max_val_cur - max_val)
self.min_val.resize_(min_val.shape)
self.max_val.resize_(max_val.shape)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
return x_orig
class HistogramObserver(UniformQuantizationObserverBase):
r"""
The module records the running histogram of tensor values along with
min/max values. ``calculate_qparams`` will calculate scale and zero_point.
Args:
bins: Number of bins to use for the histogram
upsample_rate: Factor by which the histograms are upsampled, this is
used to interpolate histograms with varying ranges across observations
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
The scale and zero point are computed as follows:
1. Create the histogram of the incoming inputs.
The histogram is computed continuously, and the ranges per bin change
with every new tensor observed.
2. Search the distribution in the histogram for optimal min/max values.
The search for the min/max values ensures the minimization of the
quantization error with respect to the floating point model.
3. Compute the scale and zero point the same way as in the
:class:`~torch.ao.quantization.MinMaxObserver`
"""
histogram: torch.Tensor
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
bins: int = 2048,
upsample_rate: int = 128,
dtype: torch.dtype = torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
) -> None:
# bins: The number of bins used for histogram calculation.
super(HistogramObserver, self).__init__(
dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs,
eps=eps,
)
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
self.bins = bins
self.register_buffer("histogram", torch.zeros(self.bins, **factory_kwargs))
self.register_buffer("min_val", torch.tensor(float("inf"), **factory_kwargs))
self.register_buffer("max_val", torch.tensor(float("-inf"), **factory_kwargs))
self.dst_nbins = 2 ** torch.iinfo(self.dtype).bits
self.upsample_rate = upsample_rate
def _get_norm(
self, delta_begin: torch.Tensor, delta_end: torch.Tensor, density: torch.Tensor
) -> torch.Tensor:
r"""
Compute the norm of the values uniformaly distributed between
delta_begin and delta_end.
Currently only L2 norm is supported.
norm = density * (integral_{begin, end} x^2)
= density * (end^3 - begin^3) / 3
"""
norm = (
delta_end * delta_end * delta_end - delta_begin * delta_begin * delta_begin
) / 3
return density * norm
def _compute_quantization_error(self, next_start_bin: int, next_end_bin: int):
r"""
Compute the quantization error if we use start_bin to end_bin as the
min and max to do the quantization.
"""
bin_width = (self.max_val.item() - self.min_val.item()) / self.bins
dst_bin_width = bin_width * (next_end_bin - next_start_bin + 1) / self.dst_nbins
if dst_bin_width == 0.0:
return 0.0
src_bin = torch.arange(self.bins, device=self.histogram.device)
# distances from the beginning of first dst_bin to the beginning and
# end of src_bin
src_bin_begin = (src_bin - next_start_bin) * bin_width
src_bin_end = src_bin_begin + bin_width
# which dst_bins the beginning and end of src_bin belong to?
dst_bin_of_begin = torch.clamp(
torch.div(src_bin_begin, dst_bin_width, rounding_mode='floor'), 0, self.dst_nbins - 1
)
dst_bin_of_begin_center = (dst_bin_of_begin + 0.5) * dst_bin_width
dst_bin_of_end = torch.clamp(
torch.div(src_bin_end, dst_bin_width, rounding_mode='floor'), 0, self.dst_nbins - 1
)
dst_bin_of_end_center = (dst_bin_of_end + 0.5) * dst_bin_width
density = self.histogram / bin_width
norm = torch.zeros(self.bins, device=self.histogram.device)
delta_begin = src_bin_begin - dst_bin_of_begin_center
delta_end = dst_bin_width / 2
norm += self._get_norm(delta_begin,
torch.ones(self.bins, device=self.histogram.device) * delta_end,
density)
norm += (dst_bin_of_end - dst_bin_of_begin - 1) * self._get_norm(
torch.tensor(-dst_bin_width / 2), torch.tensor(dst_bin_width / 2), density
)
dst_bin_of_end_center = dst_bin_of_end * dst_bin_width + dst_bin_width / 2
delta_begin = -dst_bin_width / 2
delta_end = src_bin_end - dst_bin_of_end_center
norm += self._get_norm(torch.tensor(delta_begin), delta_end, density)
return norm.sum().item()
def _non_linear_param_search(self) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Non-linear parameter search.
An approximation for L2 error minimization for selecting min/max.
By selecting new min/max, we filter out outliers in input distribution.
This follows the implementation of NormMinimization::NonlinearQuantizationParamsSearch in
caffe2/quantization/server/norm_minimization.cc
"""
assert self.histogram.size()[0] == self.bins, "bins mistmatch"
bin_width = (self.max_val - self.min_val) / self.bins
# cumulative sum
total = torch.sum(self.histogram).item()
cSum = torch.cumsum(self.histogram, dim=0)
stepsize = 1e-5 # granularity
alpha = 0.0 # lower bound
beta = 1.0 # upper bound
start_bin = 0
end_bin = self.bins - 1
norm_min = float("inf")
while alpha < beta:
# Find the next step
next_alpha = alpha + stepsize
next_beta = beta - stepsize
# find the left and right bins between the quantile bounds
l = start_bin
r = end_bin
while l < end_bin and cSum[l] < next_alpha * total:
l = l + 1
while r > start_bin and cSum[r] > next_beta * total:
r = r - 1
# decide the next move
next_start_bin = start_bin
next_end_bin = end_bin
if (l - start_bin) > (end_bin - r):
# move the start bin
next_start_bin = l
alpha = next_alpha
else:
# move the end bin
next_end_bin = r
beta = next_beta
if next_start_bin == start_bin and next_end_bin == end_bin:
continue
# calculate the quantization error using next_start_bin and next_end_bin
norm = self._compute_quantization_error(next_start_bin, next_end_bin)
if norm > norm_min:
break
norm_min = norm
start_bin = next_start_bin
end_bin = next_end_bin
new_min = self.min_val + bin_width * start_bin
new_max = self.min_val + bin_width * (end_bin + 1)
return new_min, new_max
def _adjust_min_max(
self, combined_min: torch.Tensor, combined_max: torch.Tensor, upsample_rate: int
) -> Tuple[torch.Tensor, torch.Tensor, int, int]:
# We ensure that:
# (combined_max - combined_min)/(downsample_rate*Nbins) = (max - min)/(upsample_rate*Nbins)
# This allows us to have a common grid of resolution s, where we can align
# the input histogram
# start_idx maps min_val to the histogram bin index.
hist_bin_width = (self.max_val - self.min_val) / (self.bins * upsample_rate)
downsample_rate = int(
torch.ceil(
(combined_max - combined_min) / (self.bins * hist_bin_width)
).item()
)
e = downsample_rate * (self.bins * hist_bin_width) - (
combined_max - combined_min
)
# Relax only the max, not the min, so that for one sided distributions, min stays at zero
combined_max = combined_max + e
combined_min = combined_min
start_idx = int(
torch.round((self.min_val - combined_min) / hist_bin_width).item()
)
return combined_min, combined_max, downsample_rate, start_idx
def _combine_histograms(
self,
orig_hist: torch.Tensor,
new_hist: torch.Tensor,
upsample_rate: int,
downsample_rate: int,
start_idx: int,
Nbins: int,
) -> torch.Tensor:
# First up-sample the histogram with new data by a factor of L
# This creates an approximate probability density thats piecwise constant
upsampled_histogram = new_hist.repeat_interleave(upsample_rate)
# Now insert the upsampled histogram into the output
# histogram, which is initialized with zeros.
# The offset at which the histogram is introduced is determined
# by the start index as the output histogram can cover a wider range
histogram_with_output_range = torch.zeros(
(Nbins * downsample_rate), device=orig_hist.device
)
histogram_with_output_range[
start_idx : Nbins * upsample_rate + start_idx
] = upsampled_histogram
# Compute integral histogram, double precision is needed to ensure
# that there are no overflows
integral_histogram = torch.cumsum(
histogram_with_output_range, 0, dtype=torch.double
)[downsample_rate - 1 :: downsample_rate]
# Finally perform interpolation
shifted_integral_histogram = torch.zeros((Nbins), device=orig_hist.device)
shifted_integral_histogram[1:Nbins] = integral_histogram[0:-1]
interpolated_histogram = (
integral_histogram - shifted_integral_histogram
) / upsample_rate
orig_hist = orig_hist + interpolated_histogram.to(torch.float)
return orig_hist
def forward(self, x_orig: torch.Tensor) -> torch.Tensor:
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach()
min_val = self.min_val
max_val = self.max_val
same_values = min_val.item() == max_val.item()
is_uninitialized = min_val == float("inf") and max_val == float("-inf")
if is_uninitialized or same_values:
min_val, max_val = torch.aminmax(x)
self.min_val.resize_(min_val.shape)
self.min_val.copy_(min_val)
self.max_val.resize_(max_val.shape)
self.max_val.copy_(max_val)
assert (
min_val.numel() == 1 and max_val.numel() == 1
), "histogram min/max values must be scalar."
torch.histc(
x, self.bins, min=int(min_val), max=int(max_val), out=self.histogram
)
else:
new_min, new_max = torch.aminmax(x)
combined_min = torch.min(new_min, min_val)
combined_max = torch.max(new_max, max_val)
# combine the existing histogram and new histogram into 1 histogram
# We do this by first upsampling the histogram to a dense grid
# and then downsampling the histogram efficiently
(
combined_min,
combined_max,
downsample_rate,
start_idx,
) = self._adjust_min_max(combined_min, combined_max, self.upsample_rate)
assert (
combined_min.numel() == 1 and combined_max.numel() == 1
), "histogram min/max values must be scalar."
combined_histogram = torch.histc(
x, self.bins, min=int(combined_min), max=int(combined_max)
)
if combined_min == min_val and combined_max == max_val:
combined_histogram += self.histogram
else:
combined_histogram = self._combine_histograms(
combined_histogram,
self.histogram,
self.upsample_rate,
downsample_rate,
start_idx,
self.bins,
)
self.histogram.detach_().resize_(combined_histogram.shape)
self.histogram.copy_(combined_histogram)
self.min_val.detach_().resize_(combined_min.shape)
self.min_val.copy_(combined_min)
self.max_val.detach_().resize_(combined_max.shape)
self.max_val.copy_(combined_max)
return x_orig
@torch.jit.export
def calculate_qparams(self):
is_uninitialized = self.min_val == float("inf") and self.max_val == float(
"-inf"
)
if is_uninitialized:
warnings.warn(
"must run observer before calling calculate_qparams.\
Returning default scale and zero point "
)
return torch.tensor([1.0], device=self.min_val.device.type), torch.tensor([0], device=self.min_val.device.type)
assert self.bins == len(self.histogram), (
"The number of bins in histogram should be equal to the number of bins "
"supplied while making this observer"
)
new_min, new_max = self._non_linear_param_search()
return self._calculate_qparams(new_min, new_max)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(HistogramObserver, self)._save_to_state_dict(
destination, prefix, keep_vars
)
destination[prefix + "min_val"] = self.min_val
destination[prefix + "max_val"] = self.max_val
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 3:
# if min_val and max_val are not initialized, update their shape
# to account for the differences between v2 and v3
min_val_name, max_val_name = prefix + "min_val", prefix + "max_val"
if min_val_name in state_dict:
if state_dict[min_val_name].shape == torch.Size([0]):
state_dict[min_val_name] = torch.tensor(float("inf"))
if max_val_name in state_dict:
if state_dict[max_val_name].shape == torch.Size([0]):
state_dict[max_val_name] = torch.tensor(float("-inf"))
local_state = ["min_val", "max_val"]
for name in local_state:
key = prefix + name
if key in state_dict:
val = state_dict[key]
setattr(self, name, val)
elif strict:
missing_keys.append(key)
super(HistogramObserver, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
class FixedQParamsObserver(ObserverBase):
r"""
Observer that simulates quantize and dequantize with fixed
quantization parameters in training time. Only per tensor
quantization is supported.
Args:
`scale` (float): fixed scale for the observer
`zero_point` (int): fixed zero point for the observer
`dtype`, `qscheme`, `quant_min`, `quant_max`
"""
scale: torch.Tensor
zero_point: torch.Tensor
def __init__(self,
scale,
zero_point,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
quant_min=0,
quant_max=255):
super(FixedQParamsObserver, self).__init__(dtype=dtype)
self.quant_min = quant_min
self.quant_max = quant_max
self.register_buffer('scale', torch.tensor([scale], dtype=torch.float))
self.register_buffer('zero_point', torch.tensor([zero_point], dtype=torch.int))
self.dtype = dtype
self.qscheme = qscheme
def forward(self, X):
return X
@torch.jit.export
def calculate_qparams(self):
return self.scale, self.zero_point
class PlaceholderObserver(ObserverBase):
r"""
Observer that doesn't do anything and just passes its configuration to the
quantized module's ``.from_float()``.
Can be used for quantization to float16 which doesn't require determining
ranges.
Args:
dtype: Quantized data type
custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation
(Can be used in Graph Mode Passes for special case ops).
"""
def __init__(
self, dtype=torch.float32, custom_op_name="", compute_dtype=None
) -> None:
super(PlaceholderObserver, self).__init__(dtype=dtype)
# dtype of input of the target operator, e.g. for dynamic quantization
# ops, the dtype will be float32
self.dtype = dtype
self.custom_op = custom_op_name
# used for configuration of computation type for dynamic quantization
if compute_dtype:
self.compute_dtype = compute_dtype
def forward(self, x):
return x
@torch.jit.export
def calculate_qparams(self):
raise Exception(
"calculate_qparams should not be called for PlaceholderObserver"
)
class RecordingObserver(ObserverBase):
r"""
The module is mainly for debug and records the tensor values during runtime.
Args:
dtype: Quantized data type
qscheme: Quantization scheme to be used
reduce_range: Reduces the range of the quantized data type by 1 bit
"""
__annotations__ = {"tensor_val": List[Optional[torch.Tensor]]}
def __init__(self, dtype=torch.quint8, **kwargs):
super(RecordingObserver, self).__init__(dtype=dtype, **kwargs) # type: ignore[call-arg]
self.tensor_val = []
def forward(self, x):
self.tensor_val.append(x.clone())
return x
@torch.jit.export
def calculate_qparams(self):
raise Exception("calculate_qparams should not be called for RecordingObserver")
@torch.jit.export
def get_tensor_value(self):
return self.tensor_val
class NoopObserver(ObserverBase):
r"""
Observer that doesn't do anything and just passes its configuration to the
quantized module's ``.from_float()``.
Primarily used for quantization to float16 which doesn't require determining
ranges.
Args:
dtype: Quantized data type
custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation
(Can be used in Graph Mode Passes for special case ops).
"""
def __init__(self, dtype=torch.float16, custom_op_name="") -> None:
super(NoopObserver, self).__init__(dtype=dtype)
self.dtype = dtype
self.custom_op = custom_op_name
def forward(self, x):
return x
@torch.jit.export
def calculate_qparams(self):
raise Exception("calculate_qparams should not be called for NoopObserver")
class ReuseInputObserver(ObserverBase):
r""" This observer is used when we want to reuse the observer from the operator
that produces the input Tensor, typically used for operators like reshape, e.g.
```
x0 = ...
x1 = x0.reshape()
```
if we configure x0 to be observed by some observer, let's say MinMaxObserver,
and reshape is configured with ReuseInputObserver, we'll reuse the observer instance
for x0 for x1 (output of reshape). If x0 is not observed, we also won't observe x1.
Note: this is only enabled in FX Graph Mode Quantization
"""
def __init__(self):
super().__init__(torch.quint8)
def forward(self, x):
return x
@torch.jit.export
def calculate_qparams(self):
raise Exception("calculate_qparams should not be called for ReuseInputObserver")
def _is_observer_script_module(mod, obs_type_name):
"""Returns true if given mod is an instance of Observer script module."""
if isinstance(mod, torch.jit.RecursiveScriptModule):
# qualified name looks like '__torch__.torch.ao.quantization.observer.___torch_mangle_2.MinMaxObserver'
suffix = mod._c.qualified_name.split(".", 1)[1]
name = re.sub(r"\.___torch_mangle_\d+", "", suffix)
return obs_type_name in name
return False
def _is_activation_post_process(module):
return (
isinstance(module, torch.ao.quantization.ObserverBase)
or isinstance(module, torch.ao.quantization.FakeQuantize)
or _is_observer_script_module(module, "quantization.observer")
)
def _is_per_channel_script_obs_instance(module):
if isinstance(module, torch.jit.RecursiveScriptModule):
return _is_observer_script_module(
module, "quantization.observer.PerChannelMinMaxObserver"
) or _is_observer_script_module(
module, "quantization.observer.MovingAveragePerChannelMinMaxObserver"
)
return False
def get_observer_state_dict(mod):
r"""
Returns the state dict corresponding to the observer stats.
Traverse the model state_dict and extract out the stats.
"""
od = OrderedDict()
if isinstance(mod, torch.jit.RecursiveScriptModule):
for k, v in mod.state_dict().items():
if "observer" in k:
od[k] = v
else:
# path for GraphModule and nn.Module (eager mode)
for k, v in mod.state_dict().items():
if "activation_post_process" in k:
od[k] = v
od._metadata = mod.state_dict()._metadata # type: ignore[attr-defined]
return od
def load_observer_state_dict(mod, obs_dict):
r"""
Given input model and a state_dict containing model observer stats,
load the stats back into the model. The observer state_dict can be saved
using torch.ao.quantization.get_observer_state_dict
"""
missing_keys: List[str] = []
unexpected_keys: List[str] = []
for name, module in mod.named_modules():
prefix = name + "."
if _is_activation_post_process(module):
if _is_per_channel_script_obs_instance(module):
# For per-channel observers we need to call a custom load_from_state_dict to resize the tensor.
# However this is not called when the module is scripted and we end up calling the default one in module.py
module._load_from_state_dict_script(
obs_dict, prefix, {}, True, missing_keys, unexpected_keys, []
)
else:
module._load_from_state_dict(
obs_dict, prefix, {}, False, missing_keys, unexpected_keys, []
)
for k in missing_keys:
if "observer" in k or "activation_post_process" in k:
raise Exception("Missing keys for observer {} in state_dict".format(k))
for k in unexpected_keys:
if "observer" in k or "activation_post_process" in k:
raise Exception("Unexpected keys for observer {} in state_dict".format(k))
# Restrict activations to be in the range (0,127)
default_observer = MinMaxObserver.with_args(quant_min=0, quant_max=127)
"""
Default observer for static quantization, usually used for debugging.
"""
default_placeholder_observer = PlaceholderObserver
"""
Default placeholder observer, usually used for quantization to torch.float16.
"""
default_debug_observer = RecordingObserver
"""
Default debug-only observer.
"""
default_weight_observer = MinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_tensor_symmetric
)
"""
Default weight observer.
"""
weight_observer_range_neg_127_to_127 = MinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_tensor_symmetric,
quant_min=-127, quant_max=127, eps=2 ** -12)
"""
Symmetric weight observer with the 8-bit values restricted to [-127, +127], excluding -128.
"""
default_histogram_observer = HistogramObserver.with_args(quant_min=0, quant_max=127)
"""
Default histogram observer, usually used for PTQ.
"""
default_per_channel_weight_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
)
"""
Default per-channel weight observer, usually used on backends where per-channel
weight quantization is supported, such as `fbgemm`.
"""
per_channel_weight_observer_range_neg_127_to_127 = MinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric,
quant_min=-127, quant_max=127, eps=2 ** -12)
"""
Per-channel, symmetric weight observer with the 8-bit values restricted to [-127, +127], excluding -128.
"""
default_dynamic_quant_observer = PlaceholderObserver.with_args(
dtype=torch.float, compute_dtype=torch.quint8
)
"""
Default observer for dynamic quantization.
"""
default_float_qparams_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.quint8, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0
)
"""
Default observer for a floating point zero-point.
"""
default_float_qparams_observer_4bit = PerChannelMinMaxObserver.with_args(
dtype=torch.quint4x2, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0
)
"""
Default observer for a floating point zero-point and 4 bit activations.
"""
# TODO(future PR): remove these defaults and enforce activation functions
# to explicitly specify their output range
default_fixed_qparams_range_neg1to1_observer = FixedQParamsObserver.with_args(
scale=2.0 / 256.0, zero_point=128, dtype=torch.quint8, quant_min=0, quant_max=255)
default_fixed_qparams_range_0to1_observer = FixedQParamsObserver.with_args(
scale=1.0 / 256.0, zero_point=0, dtype=torch.quint8, quant_min=0, quant_max=255)
# TODO: the following 2 variables are kept for backwards compatibility; remove after a few releases
default_symmetric_fixed_qparams_observer = default_fixed_qparams_range_neg1to1_observer
default_affine_fixed_qparams_observer = default_fixed_qparams_range_0to1_observer
"""
Default observers for fixed qparams operations.
"""
default_reuse_input_observer = ReuseInputObserver
"""
Default observer for operators like reshape that reuses the observer of input to
the operator
"""
|
pytorch-master
|
torch/ao/quantization/observer.py
|
import copy
import torch.nn as nn
from torch.ao.quantization.fuser_method_mappings import get_fuser_method
# for backward compatiblity
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401
from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401
from torch.nn.utils.parametrize import type_before_parametrizations
from typing import List, Optional
# Generalization of getattr
def _get_module(model, submodule_key):
tokens = submodule_key.split('.')
cur_mod = model
for s in tokens:
cur_mod = getattr(cur_mod, s)
return cur_mod
# Generalization of setattr
def _set_module(model, submodule_key, module):
tokens = submodule_key.split('.')
sub_tokens = tokens[:-1]
cur_mod = model
for s in sub_tokens:
cur_mod = getattr(cur_mod, s)
setattr(cur_mod, tokens[-1], module)
def fuse_known_modules(mod_list, is_qat, additional_fuser_method_mapping=None):
r"""Returns a list of modules that fuses the operations specified
in the input module list.
Fuses only the following sequence of modules:
conv, bn
conv, bn, relu
conv, relu
linear, bn
linear, relu
For these sequences, the first element in the output module list performs
the fused operation. The rest of the elements are set to nn.Identity()
"""
types = tuple(type_before_parametrizations(m) for m in mod_list)
fuser_method = get_fuser_method(types, additional_fuser_method_mapping)
if fuser_method is None:
raise NotImplementedError("Cannot fuse modules: {}".format(types))
new_mod : List[Optional[nn.Module]] = [None] * len(mod_list)
fused = fuser_method(is_qat, *mod_list)
# NOTE: forward hooks not processed in the two following for loops will be lost after the fusion
# Move pre forward hooks of the base module to resulting fused module
for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items():
fused.register_forward_pre_hook(pre_hook_fn)
del mod_list[0]._forward_pre_hooks[handle_id]
# Move post forward hooks of the last module to resulting fused module
for handle_id, hook_fn in mod_list[-1]._forward_hooks.items():
fused.register_forward_hook(hook_fn)
del mod_list[-1]._forward_hooks[handle_id]
new_mod[0] = fused
for i in range(1, len(mod_list)):
identity = nn.Identity()
identity.training = mod_list[0].training
new_mod[i] = identity
return new_mod
def _fuse_modules_helper(model, modules_to_fuse, is_qat, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
if fuse_custom_config_dict is None:
fuse_custom_config_dict = {}
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
mod_list = []
for item in modules_to_fuse:
mod_list.append(_get_module(model, item))
# Fuse list of modules
new_mod_list = fuser_func(mod_list, is_qat, additional_fuser_method_mapping)
# Replace original module list with fused module list
for i, item in enumerate(modules_to_fuse):
_set_module(model, item, new_mod_list[i])
def _fuse_modules(model, modules_to_fuse, is_qat, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
if not inplace:
model = copy.deepcopy(model)
if all(isinstance(module_element, str) for module_element in modules_to_fuse):
# Handle case of modules_to_fuse being a list
_fuse_modules_helper(model, modules_to_fuse, is_qat, fuser_func, fuse_custom_config_dict)
else:
# Handle case of modules_to_fuse being a list of lists
for module_list in modules_to_fuse:
_fuse_modules_helper(model, module_list, is_qat, fuser_func, fuse_custom_config_dict)
return model
def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
r"""Fuses a list of modules into a single module
Fuses only the following sequence of modules:
conv, bn
conv, bn, relu
conv, relu
linear, relu
bn, relu
All other sequences are left unchanged.
For these sequences, replaces the first item in the list
with the fused module, replacing the rest of the modules
with identity.
Args:
model: Model containing the modules to be fused
modules_to_fuse: list of list of module names to fuse. Can also be a list
of strings if there is only a single list of modules to fuse.
inplace: bool specifying if fusion happens in place on the model, by default
a new model is returned
fuser_func: Function that takes in a list of modules and outputs a list of fused modules
of the same length. For example,
fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()]
Defaults to torch.ao.quantization.fuse_known_modules
`fuse_custom_config_dict`: custom configuration for fusion
.. code-block:: python
# Example of fuse_custom_config_dict
fuse_custom_config_dict = {
# Additional fuser_method mapping
"additional_fuser_method_mapping": {
(torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn
},
}
Returns:
model with fused modules. A new copy is created if inplace=True.
Examples::
>>> # xdoctest: +SKIP
>>> m = M().eval()
>>> # m is a module containing the sub-modules below
>>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']]
>>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
>>> output = fused_m(input)
>>> m = M().eval()
>>> # Alternately provide a single list of modules to fuse
>>> modules_to_fuse = ['conv1', 'bn1', 'relu1']
>>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)
>>> output = fused_m(input)
"""
return _fuse_modules(
model,
modules_to_fuse,
is_qat=False,
inplace=inplace,
fuser_func=fuse_known_modules,
fuse_custom_config_dict=None)
def fuse_modules_qat(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):
""" QAT version for `fuse_modules`
"""
return _fuse_modules(
model,
modules_to_fuse,
is_qat=True,
inplace=inplace,
fuser_func=fuse_known_modules,
fuse_custom_config_dict=None)
|
pytorch-master
|
torch/ao/quantization/fuse_modules.py
|
import torch
from torch.nn.parameter import Parameter
class _LearnableFakeQuantize(torch.ao.quantization.FakeQuantizeBase):
r""" This is an extension of the FakeQuantize module in fake_quantize.py, which
supports more generalized lower-bit quantization and support learning of the scale
and zero point parameters through backpropagation. For literature references,
please see the class _LearnableFakeQuantizePerTensorOp.
In addition to the attributes in the original FakeQuantize module, the _LearnableFakeQuantize
module also includes the following attributes to support quantization parameter learning.
* :attr:`channel_len` defines the length of the channel when initializing scale and zero point
for the per channel case.
* :attr:`use_grad_scaling` defines the flag for whether the gradients for scale and zero point are
normalized by the constant, which is proportional to the square root of the number of
elements in the tensor. The related literature justifying the use of this particular constant
can be found here: https://openreview.net/pdf?id=rkgO66VKDS.
* :attr:`fake_quant_enabled` defines the flag for enabling fake quantization on the output.
* :attr:`static_enabled` defines the flag for using observer's static estimation for
scale and zero point.
* :attr:`learning_enabled` defines the flag for enabling backpropagation for scale and zero point.
"""
def __init__(self, observer, quant_min=0, quant_max=255, scale=1., zero_point=0., channel_len=-1,
use_grad_scaling=False, **observer_kwargs):
super(_LearnableFakeQuantize, self).__init__()
assert quant_min < quant_max, 'quant_min must be strictly less than quant_max.'
self.quant_min = quant_min
self.quant_max = quant_max
# also pass quant_min and quant_max to observer
observer_kwargs["quant_min"] = quant_min
observer_kwargs["quant_max"] = quant_max
self.use_grad_scaling = use_grad_scaling
if channel_len == -1:
self.scale = Parameter(torch.tensor([scale]))
self.zero_point = Parameter(torch.tensor([zero_point]))
else:
assert isinstance(channel_len, int) and channel_len > 0, "Channel size must be a positive integer."
self.scale = Parameter(torch.tensor([scale] * channel_len))
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
self.activation_post_process = observer(**observer_kwargs)
assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, \
'quant_min out of bound'
assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, \
'quant_max out of bound'
self.dtype = self.activation_post_process.dtype
self.qscheme = self.activation_post_process.qscheme
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('static_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('learning_enabled', torch.tensor([0], dtype=torch.uint8))
bitrange = torch.tensor(quant_max - quant_min + 1).double()
self.bitwidth = int(torch.log2(bitrange).item())
self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps]))
@torch.jit.export
def enable_param_learning(self):
r"""Enables learning of quantization parameters and
disables static observer estimates. Forward path returns fake quantized X.
"""
self.toggle_qparam_learning(enabled=True) \
.toggle_fake_quant(enabled=True) \
.toggle_observer_update(enabled=False)
return self
@torch.jit.export
def enable_static_estimate(self):
r"""Enables static observer estimates and disbales learning of
quantization parameters. Forward path returns fake quantized X.
"""
self.toggle_qparam_learning(enabled=False) \
.toggle_fake_quant(enabled=True) \
.toggle_observer_update(enabled=True)
@torch.jit.export
def enable_static_observation(self):
r"""Enables static observer accumulating data from input but doesn't
update the quantization parameters. Forward path returns the original X.
"""
self.toggle_qparam_learning(enabled=False) \
.toggle_fake_quant(enabled=False) \
.toggle_observer_update(enabled=True)
@torch.jit.export
def toggle_observer_update(self, enabled=True):
self.static_enabled[0] = int(enabled) # type: ignore[operator]
return self
@torch.jit.export
def enable_observer(self, enabled=True):
self.toggle_observer_update(enabled)
@torch.jit.export
def toggle_qparam_learning(self, enabled=True):
self.learning_enabled[0] = int(enabled) # type: ignore[operator]
self.scale.requires_grad = enabled
self.zero_point.requires_grad = enabled
return self
@torch.jit.export
def toggle_fake_quant(self, enabled=True):
self.fake_quant_enabled[0] = int(enabled)
return self
@torch.jit.export
def observe_quant_params(self):
print('_LearnableFakeQuantize Scale: {}'.format(self.scale.detach()))
print('_LearnableFakeQuantize Zero Point: {}'.format(self.zero_point.detach()))
@torch.jit.export
def calculate_qparams(self):
self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator]
scale = self.scale.detach()
zero_point = self.zero_point.detach().round().clamp(self.quant_min, self.quant_max).long()
return scale, zero_point
def forward(self, X):
if self.static_enabled[0] == 1: # type: ignore[index]
self.activation_post_process(X.detach())
_scale, _zero_point = self.activation_post_process.calculate_qparams()
_scale = _scale.to(self.scale.device)
_zero_point = _zero_point.to(self.zero_point.device)
self.scale.data.copy_(_scale)
self.zero_point.data.copy_(_zero_point)
else:
self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator]
if self.fake_quant_enabled[0] == 1:
if self.qscheme in (torch.per_channel_symmetric, torch.per_tensor_symmetric):
self.zero_point.data.zero_()
if self.use_grad_scaling:
grad_factor = 1.0 / (X.numel() * self.quant_max) ** 0.5
else:
grad_factor = 1.0
if self.qscheme in (
torch.per_channel_symmetric, torch.per_channel_affine):
X = torch._fake_quantize_learnable_per_channel_affine(
X, self.scale, self.zero_point, self.ch_axis,
self.quant_min, self.quant_max, grad_factor)
else:
X = torch._fake_quantize_learnable_per_tensor_affine(
X, self.scale, self.zero_point,
self.quant_min, self.quant_max, grad_factor)
return X
|
pytorch-master
|
torch/ao/quantization/_learnable_fake_quantize.py
|
import copy
import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.quantized as nniq
import torch.nn.intrinsic.quantized.dynamic as nniqd
import torch.nn.intrinsic.qat as nniqat
import torch.nn.quantized as nnq
import torch.nn.quantized._reference as nnqr
import torch.nn.quantized.dynamic as nnqd
import torch.nn.qat as nnqat
import torch.nn.qat.dynamic as nnqatd
from typing import Optional, Union, Dict, Set, Callable, Any
import torch.ao.nn as ao_nn
from torch.ao.quantization.stubs import QuantStub, DeQuantStub
from torch.ao.quantization.fake_quantize import (
default_fixed_qparams_range_0to1_fake_quant,
default_fixed_qparams_range_neg1to1_fake_quant,
)
from torch.ao.quantization.utils import get_combined_dict
from torch.nn.utils.parametrize import type_before_parametrizations
# Default map for swapping float module to reference quantized modules
DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {
QuantStub: nnq.Quantize,
DeQuantStub: nnq.DeQuantize,
nn.Linear: nnqr.Linear,
nn.Conv1d: nnqr.Conv1d,
nn.Conv2d: nnqr.Conv2d,
nn.Conv3d: nnqr.Conv3d,
nn.ConvTranspose1d: nnqr.ConvTranspose1d,
nn.ConvTranspose2d: nnqr.ConvTranspose2d,
nn.ConvTranspose3d: nnqr.ConvTranspose3d,
nn.Embedding: nnqr.Embedding,
nn.EmbeddingBag: nnqr.EmbeddingBag,
nn.GRUCell: nnqr.GRUCell,
nn.LSTMCell: nnqr.LSTMCell,
nn.RNNCell: nnqr.RNNCell,
nn.LSTM: nnqr.LSTM,
}
# Default map for swapping float module to quantized ones
DEFAULT_STATIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {
QuantStub: nnq.Quantize,
DeQuantStub: nnq.DeQuantize,
nn.BatchNorm2d: nnq.BatchNorm2d,
nn.BatchNorm3d: nnq.BatchNorm3d,
nn.Dropout: nnq.Dropout,
nn.Conv1d: nnq.Conv1d,
nn.Conv2d: nnq.Conv2d,
nn.Conv3d: nnq.Conv3d,
nn.ConvTranspose1d: nnq.ConvTranspose1d,
nn.ConvTranspose2d: nnq.ConvTranspose2d,
nn.ConvTranspose3d: nnq.ConvTranspose3d,
nn.ELU: nnq.ELU,
nn.Embedding: nnq.Embedding,
nn.EmbeddingBag: nnq.EmbeddingBag,
nn.GroupNorm: nnq.GroupNorm,
nn.Hardswish: nnq.Hardswish,
nn.InstanceNorm1d: nnq.InstanceNorm1d,
nn.InstanceNorm2d: nnq.InstanceNorm2d,
nn.InstanceNorm3d: nnq.InstanceNorm3d,
nn.LayerNorm: nnq.LayerNorm,
nn.LeakyReLU: nnq.LeakyReLU,
nn.modules.linear.NonDynamicallyQuantizableLinear: nnq.Linear,
nn.Linear: nnq.Linear,
nn.ReLU6: nnq.ReLU6,
nn.Dropout: nnq.Dropout,
nn.PReLU: nnq.PReLU,
# Wrapper Modules:
nnq.FloatFunctional: nnq.QFunctional,
# Intrinsic modules:
nni.BNReLU2d: nniq.BNReLU2d,
nni.BNReLU3d: nniq.BNReLU3d,
nni.ConvReLU1d: nniq.ConvReLU1d,
nni.ConvReLU2d: nniq.ConvReLU2d,
nni.ConvReLU3d: nniq.ConvReLU3d,
nni.LinearReLU: nniq.LinearReLU,
nniqat.ConvBn1d: nnq.Conv1d,
nniqat.ConvBn2d: nnq.Conv2d,
nniqat.ConvBn3d: nnq.Conv3d,
nniqat.ConvBnReLU1d: nniq.ConvReLU1d,
nniqat.ConvBnReLU2d: nniq.ConvReLU2d,
nniqat.ConvBnReLU3d: nniq.ConvReLU3d,
nniqat.ConvReLU2d: nniq.ConvReLU2d,
nniqat.ConvReLU3d: nniq.ConvReLU3d,
nniqat.LinearReLU: nniq.LinearReLU,
nniqat.LinearBn1d: nnq.Linear,
# QAT modules:
nnqat.Linear: nnq.Linear,
nnqat.Conv2d: nnq.Conv2d,
nnqat.Conv3d: nnq.Conv3d,
}
# Default map for swapping float module to qat modules
DEFAULT_QAT_MODULE_MAPPINGS : Dict[Callable, Any] = {
nn.Conv2d: nnqat.Conv2d,
nn.Conv3d: nnqat.Conv3d,
nn.Linear: nnqat.Linear,
nn.modules.linear.NonDynamicallyQuantizableLinear: nnqat.Linear,
# Intrinsic modules:
nni.ConvBn1d: nniqat.ConvBn1d,
nni.ConvBn2d: nniqat.ConvBn2d,
nni.ConvBn3d: nniqat.ConvBn3d,
nni.ConvBnReLU1d: nniqat.ConvBnReLU1d,
nni.ConvBnReLU2d: nniqat.ConvBnReLU2d,
nni.ConvBnReLU3d: nniqat.ConvBnReLU3d,
nni.ConvReLU2d: nniqat.ConvReLU2d,
nni.ConvReLU3d: nniqat.ConvReLU3d,
nni.LinearReLU: nniqat.LinearReLU,
nni.LinearBn1d: nniqat.LinearBn1d,
}
# Default map for swapping dynamic modules
DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {
nn.GRUCell: nnqd.GRUCell,
nn.Linear: nnqd.Linear,
nnqatd.Linear: nnqd.Linear,
nn.modules.linear.NonDynamicallyQuantizableLinear: nnqd.Linear,
nn.LSTM: nnqd.LSTM,
nn.GRU: nnqd.GRU,
nn.LSTMCell: nnqd.LSTMCell,
nn.RNNCell: nnqd.RNNCell,
nni.LinearReLU: nniqd.LinearReLU,
nn.EmbeddingBag: nnq.EmbeddingBag,
nn.Embedding: nnq.Embedding,
# Don't want to enable these by default because the numerical
# accuracy is poor compared to other dynamic ops
# nn.Conv1d: nnqd.Conv1d,
# nn.Conv2d: nnqd.Conv2d,
# nn.Conv3d: nnqd.Conv3d,
# nn.ConvTranspose1d: nnqd.ConvTranspose1d,
# nn.ConvTranspose2d: nnqd.ConvTranspose2d,
# nn.ConvTranspose3d: nnqd.ConvTranspose3d,
}
# Allowlist for propagating the qconfig
_INCLUDE_QCONFIG_PROPAGATE_LIST : Set[Callable] = {
nn.Sequential,
}
# Default mapping from floating point function or torch ops to quantized ops
# TODO: merge with default static mapping
DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS : Dict[Union[Callable, str], Callable] = {
F.elu: torch.ops.quantized.elu,
F.hardswish: torch.ops.quantized.hardswish,
F.instance_norm: torch.ops.quantized.instance_norm,
F.layer_norm: torch.ops.quantized.layer_norm,
F.leaky_relu: torch.ops.quantized.leaky_relu,
F.dropout: torch.ops.quantized.dropout,
}
# mapping from module to output activation post process class
DEFAULT_MODULE_TO_ACT_POST_PROCESS : Dict[Callable, Callable] = {
nn.Hardsigmoid: default_fixed_qparams_range_0to1_fake_quant,
nn.Sigmoid: default_fixed_qparams_range_0to1_fake_quant,
nn.Softmax: default_fixed_qparams_range_0to1_fake_quant,
nn.Tanh: default_fixed_qparams_range_neg1to1_fake_quant,
}
# Default map for swapping float module to static sparse quantized ones
DEFAULT_STATIC_SPARSE_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {
nn.Linear: ao_nn.sparse.quantized.Linear
}
# Default map for swapping float module to dynamic sparse quantized ones
DEFAULT_DYNAMIC_SPARSE_QUANT_MODULE_MAPPINGS : Dict[Callable, Any] = {
nn.Linear: ao_nn.sparse.quantized.dynamic.Linear
}
def no_observer_set() -> Set[Any]:
r"""These modules cannot have observers inserted by default."""
no_observers = set([
nn.quantizable.LSTM,
nn.quantizable.MultiheadAttention
])
return no_observers
def get_default_static_quant_module_mappings() -> Dict[Callable, Any]:
''' Get module mapping for post training static quantization
'''
return copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS)
def get_default_static_quant_reference_module_mappings() -> Dict[Callable, Any]:
''' Get reference module mapping for post training static quantization
'''
return copy.deepcopy(DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS)
def get_embedding_static_quant_module_mappings() -> Dict[Callable, Any]:
''' Get module mapping, including mapping for embedding QAT
'''
mapping = copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS)
mapping[nnqat.EmbeddingBag] = nnq.EmbeddingBag
mapping[nnqat.Embedding] = nnq.Embedding
return mapping
def get_default_static_sparse_quant_module_mappings() -> Dict[Callable, Any]:
''' Get module mapping for post training static sparse quantization
'''
return copy.deepcopy(DEFAULT_STATIC_SPARSE_QUANT_MODULE_MAPPINGS)
def get_static_quant_module_class(
float_module_class: Callable,
additional_static_quant_mapping: Optional[Dict[Callable, Any]] = None,
is_reference: bool = False) -> Any:
r"""n Get the statically quantized module class corresponding to
the floating point module class
"""
if additional_static_quant_mapping is None:
additional_static_quant_mapping = {}
all_mappings = get_combined_dict(
DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS if is_reference
else DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, additional_static_quant_mapping)
static_quant_module_class = all_mappings.get(float_module_class, None)
assert static_quant_module_class is not None, \
"Floating point module class {}".format(str(float_module_class)) + \
" does not have a corresponding quantized module class"
return copy.deepcopy(static_quant_module_class)
def get_dynamic_quant_module_class(
float_module_class: Callable,
additional_dynamic_quant_mapping: Optional[Dict[Callable, Any]] = None) -> Any:
r"""n Get the dynamically quantized module class corresponding to
the floating point module class
"""
if additional_dynamic_quant_mapping is None:
additional_dynamic_quant_mapping = {}
all_mappings = get_combined_dict(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, additional_dynamic_quant_mapping)
dynamic_quant_module_class = all_mappings.get(float_module_class, None)
assert dynamic_quant_module_class is not None, \
"Floating point module class {}".format(str(float_module_class)) + \
" does not have a corresponding quantized module class"
return copy.deepcopy(dynamic_quant_module_class)
def get_default_qat_module_mappings() -> Dict[Callable, Any]:
''' Get default module mapping for quantization aware training
'''
return copy.deepcopy(DEFAULT_QAT_MODULE_MAPPINGS)
def get_embedding_qat_module_mappings() -> Dict[Callable, Any]:
''' Get module mapping for quantization aware training
This is includes default values in addition to
enabling qat for embeddings.
'''
mapping = copy.deepcopy(DEFAULT_QAT_MODULE_MAPPINGS)
mapping[nn.EmbeddingBag] = nnqat.EmbeddingBag
mapping[nn.Embedding] = nnqat.Embedding
return mapping
def get_default_dynamic_quant_module_mappings() -> Dict[Callable, Any]:
''' Get module mapping for post training dynamic quantization
'''
return DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS
def get_default_dynamic_sparse_quant_module_mappings() -> Dict[Callable, Any]:
''' Get module mapping for post training dynamic sparse quantization
'''
return DEFAULT_DYNAMIC_SPARSE_QUANT_MODULE_MAPPINGS
def get_default_qconfig_propagation_list() -> Set[Callable]:
''' Get the default list of module types that we'll attach qconfig
attribute to in prepare
'''
QCONFIG_PROPAGATE_MODULE_CLASS_LIST = (
(set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys()) |
set(DEFAULT_QAT_MODULE_MAPPINGS.keys()) |
set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys()) |
_INCLUDE_QCONFIG_PROPAGATE_LIST)
)
return copy.deepcopy(QCONFIG_PROPAGATE_MODULE_CLASS_LIST)
def get_default_compare_output_module_list() -> Set[Callable]:
''' Get list of module class types that we will record output
in numeric suite
'''
NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST = (
set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.values())
| set(DEFAULT_QAT_MODULE_MAPPINGS.values())
| set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.values())
| set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys())
| set(DEFAULT_QAT_MODULE_MAPPINGS.keys())
| set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys())
| _INCLUDE_QCONFIG_PROPAGATE_LIST
)
return copy.deepcopy(NUMERIC_SUITE_COMPARE_MODEL_OUTPUT_MODULE_LIST)
def get_default_float_to_quantized_operator_mappings(
) -> Dict[Union[Callable, str], Callable]:
return copy.deepcopy(DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS)
# TODO: merge with get_static_quant_module_class
def get_quantized_operator(float_op: Union[Callable, str]) -> Callable:
''' Get the quantized operator corresponding to the float operator
'''
quantized_op = DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS.get(float_op, None)
assert quantized_op is not None, \
'Operator {} does not have corresponding quantized op'.format(str(float_op))
return quantized_op
def _get_special_act_post_process(module: torch.nn.Module) -> Optional[Callable]:
r""" Get the special activation post process for `module`, this has
higher priority than the activation post process in `qconfig`
e.g.
input: torch.nn.Sigmoid
output: default_affine_fixed_qparam_fake_quant
"""
return DEFAULT_MODULE_TO_ACT_POST_PROCESS.get(type_before_parametrizations(module), None)
def _has_special_act_post_process(module: torch.nn.Module) -> bool:
return module.training and type(module) in DEFAULT_MODULE_TO_ACT_POST_PROCESS
|
pytorch-master
|
torch/ao/quantization/quantization_mappings.py
|
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.ao.quantization
import torch.ao.ns._numeric_suite as ns
_supported_modules = {nn.Linear, nn.Conv2d}
_supported_modules_quantized = {nnq.Linear, nnq.Conv2d}
def get_module(model, name):
''' Given name of submodule, this function grabs the submodule from given model
'''
return dict(model.named_modules())[name]
def parent_child_names(name):
'''Splits full name of submodule into parent submodule's full name and submodule's name
'''
split_name = name.rsplit('.', 1)
if len(split_name) == 1:
return '', split_name[0]
else:
return split_name[0], split_name[1]
def get_param(module, attr):
''' Sometimes the weights/bias attribute gives you the raw tensor, but sometimes
gives a function that will give you the raw tensor, this function takes care of that logic
'''
param = getattr(module, attr, None)
if callable(param):
return param()
else:
return param
class MeanShadowLogger(ns.Logger):
r"""A logger for a Shadow module whose purpose is to record the rolling mean
of the data passed to the floating point and quantized models
"""
def __init__(self):
super(MeanShadowLogger, self).__init__()
self.stats["float"] = None
self.stats["quantized"] = None
self.count = 0
self.float_sum = None
self.quant_sum = None
def forward(self, x, y):
''' The inputs x,y are output data from the quantized and floating-point modules.
x is for the quantized module, y is for the floating point module
'''
if x.is_quantized:
x = x.dequantize()
self.count += 1
if self.stats["quantized"] is None:
self.stats["quantized"] = x
self.quant_sum = x
else:
self.quant_sum += x
self.stats["quantized"] = self.quant_sum / self.count
if self.stats["float"] is None:
self.stats["float"] = y
self.float_sum = y
else:
self.float_sum += y
self.stats["float"] = self.float_sum / self.count
def clear(self):
self.stats["float"] = None
self.stats["quantized"] = None
self.count = 0
self.float_sum = None
self.quant_sum = None
def bias_correction(float_model, quantized_model, img_data, target_modules=_supported_modules_quantized, neval_batches=None):
''' Using numeric suite shadow module, the expected output of the floating point and quantized modules
is recorded. Using that data the bias of supported modules is shifted to compensate for the drift caused
by quantization
Paper reference: https://arxiv.org/pdf/1906.04721.pdf (Section 4.2)
Args:
float_model: a trained model that serves as a reference to what bias correction should aim for
quantized_model: quantized form of float_model that bias correction is to applied to
img_data: calibration data to estimate the expected output (used to find quantization error)
target_modules: specifies what submodules in quantized_model need bias correction (can be extended to
unquantized submodules)
neval_batches: a cap to the number of batches you want to be used for estimating the expected output
'''
ns.prepare_model_with_stubs(float_model, quantized_model, _supported_modules, MeanShadowLogger)
uncorrected_modules = {}
for name, submodule in quantized_model.named_modules():
if type(submodule) in target_modules:
uncorrected_modules[name] = submodule
for uncorrected_module in uncorrected_modules:
quantized_submodule = get_module(quantized_model, uncorrected_module)
bias = get_param(quantized_submodule, 'bias')
if bias is not None:
count = 0
for data in img_data:
quantized_model(data[0])
count += 1
if count == neval_batches:
break
ob_dict = ns.get_logger_dict(quantized_model)
parent_name, _ = parent_child_names(uncorrected_module)
float_data = ob_dict[parent_name + '.stats']['float']
quant_data = ob_dict[parent_name + '.stats']['quantized']
# math for expected_error
quantization_error = quant_data - float_data
dims = list(range(quantization_error.dim()))
# Note: we don't want to take the mean over the output channel dimension
dims.remove(1)
expected_error = torch.mean(quantization_error, dims)
updated_bias = bias.data - expected_error
bias.data = updated_bias
# Resets the data contained in the loggers
for name, submodule in quantized_model.named_modules():
if isinstance(submodule, MeanShadowLogger):
submodule.clear()
|
pytorch-master
|
torch/ao/quantization/_correct_bias.py
|
import copy
import itertools
import warnings
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.intrinsic import _FusedModule
from torch.ao.quantization.quantization_mappings import (
get_default_dynamic_quant_module_mappings,
get_default_static_quant_module_mappings,
get_default_static_quant_reference_module_mappings,
get_default_qat_module_mappings,
get_default_qconfig_propagation_list,
no_observer_set,
_has_special_act_post_process,
_get_special_act_post_process,
)
from .utils import get_qparam_dict, has_no_children_ignoring_parametrizations
from torch.ao.quantization.stubs import DeQuantStub, QuantWrapper
from torch.ao.quantization.qconfig import (
add_module_to_qconfig_obs_ctr,
default_dynamic_qconfig,
float16_dynamic_qconfig,
float_qparams_weight_only_qconfig,
float_qparams_weight_only_qconfig_4bit,
activation_is_memoryless)
from torch.nn.utils.parametrize import type_before_parametrizations
_DEFAULT_CUSTOM_CONFIG_DICT = {
'float_to_observed_custom_module_class': {
nn.LSTM: nn.quantizable.LSTM,
nn.MultiheadAttention: nn.quantizable.MultiheadAttention,
},
'observed_to_quantized_custom_module_class': {
nn.quantizable.LSTM: nn.quantized.LSTM,
nn.quantizable.MultiheadAttention: nn.quantized.MultiheadAttention,
}
}
def get_default_custom_config_dict():
r"""Defines the default custom config dict.
"""
return _DEFAULT_CUSTOM_CONFIG_DICT
def is_activation_post_process(module):
return (isinstance(module, torch.ao.quantization.ObserverBase) or
isinstance(module, torch.ao.quantization.FakeQuantizeBase))
def _propagate_qconfig_helper(module, qconfig_dict,
qconfig_parent=None, prefix='', prepare_custom_config_dict=None):
r"""This is a helper function for `propagate_qconfig_`
Args:
module: input module
qconfig_dict: dictionary that maps from name of submodule to quantization
configuration
qconfig_parent: quantization config of parent module, we will fallback to
this config when there is no specified config for current
module
prefix: corresponding prefix of the current module, used as key in
qconfig_dict
prepare_custom_config_dict: dictionary for custom handling of modules
see docs for :func:`~torch.ao.quantization.prepare_fx`
Return:
None, module is modified inplace with qconfig attached
"""
module_qconfig = qconfig_dict.get(type_before_parametrizations(module), qconfig_parent)
module_qconfig = qconfig_dict.get(prefix, module_qconfig)
module_qconfig = getattr(module, 'qconfig', module_qconfig)
torch.ao.quantization.qconfig.assert_valid_qconfig(module_qconfig, module)
qconfig_with_device_check = add_module_to_qconfig_obs_ctr(module_qconfig, module)
module.qconfig = qconfig_with_device_check
for name, child in module.named_children():
module_prefix = prefix + '.' + name if prefix else name
# do no not propagate qconfig to child if child is non traceable
if prepare_custom_config_dict is None or not (
name in prepare_custom_config_dict.get("non_traceable_module_name", [])
or type(child) in prepare_custom_config_dict.get("non_traceable_module_class", [])
):
_propagate_qconfig_helper(
child, qconfig_dict, qconfig_with_device_check, module_prefix
)
def propagate_qconfig_(module, qconfig_dict=None, prepare_custom_config_dict=None):
r"""Propagate qconfig through the module hierarchy and assign `qconfig`
attribute on each leaf module
Args:
module: input module
qconfig_dict: dictionary that maps from name or type of submodule to
quantization configuration, qconfig applies to all submodules of a
given module unless qconfig for the submodules are specified (when
the submodule already has qconfig attribute)
prepare_custom_config_dict: dictionary for custom handling of modules
see docs for :func:`~torch.ao.quantization.prepare_fx`
Return:
None, module is modified inplace with qconfig attached
"""
if qconfig_dict is None:
qconfig_dict = {}
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
_propagate_qconfig_helper(module, qconfig_dict, prepare_custom_config_dict=prepare_custom_config_dict)
def _observer_forward_hook(self, input, output):
r"""Forward hook that calls observer on the output
"""
return self.activation_post_process(output)
def _observer_forward_pre_hook(self, input):
r"""Forward pre hook that calls observer on the output
"""
return self.activation_post_process(input[0])
def register_activation_post_process_hook(module, pre_hook=False):
assert hasattr(module, 'activation_post_process'), \
'Expect activation_post_process attribute already attached to the module'
if pre_hook:
handle = module.register_forward_pre_hook(_observer_forward_pre_hook)
module._forward_pre_hooks.move_to_end(handle.id, last=False)
else:
handle = module.register_forward_hook(_observer_forward_hook)
module._forward_hooks.move_to_end(handle.id, last=False)
def add_observer_(module, qconfig_propagation_list=None, non_leaf_module_list=None, device=None, custom_module_class_mapping=None):
r"""Add observer for the leaf child of the module.
This function insert observer module to all leaf child module that
has a valid qconfig attribute.
Args:
module: input module with qconfig attributes for all the leaf modules that we want to quantize
qconfig_propagation_list: a list of quantizable modules that will have observers added to them
if they are leaf nodes
device: parent device, if any
non_leaf_module_list: list of non-leaf modules we want to add observer
Return:
None, module is modified inplace with added observer modules and forward_hooks
"""
if qconfig_propagation_list is None:
qconfig_propagation_list = get_default_qconfig_propagation_list()
if custom_module_class_mapping is None:
custom_module_class_mapping = {}
# respect device affinity when adding observers
if device is None:
devices = get_unique_devices_(module)
assert len(devices) <= 1, (
"add_observer_ only works with cpu or single-device CUDA modules, "
"but got devices {}".format(devices)
)
device = next(iter(devices)) if len(devices) > 0 else None
def get_activation_post_process(qconfig, device, special_act_post_process=None):
activation = qconfig.activation() if special_act_post_process is None else special_act_post_process()
if device is not None:
activation.to(device)
return activation
def needs_observation(m):
return hasattr(m, 'qconfig') and m.qconfig is not None
def insert_activation_post_process(m, special_act_post_process=None):
""" Adds an activation post process module and register
a pre or post hook that calls the module
"""
# We don't insert observer/fake_quantize for DeQuantStub
if needs_observation(m) and not isinstance(m, DeQuantStub):
# observer and hook will be gone after we swap the module
m.add_module('activation_post_process', get_activation_post_process(
m.qconfig, device, special_act_post_process))
# Register observer as the first entry in the hook list
# All post forward hooks are preserved and will be executed after the observer before convert
register_activation_post_process_hook(m, pre_hook=activation_is_memoryless(m.qconfig))
for name, child in module.named_children():
# TODO remove Dropout special after codebase stable
if type_before_parametrizations(child) in [nn.Dropout]:
continue
elif type_before_parametrizations(child) in [nnq.FloatFunctional, nnq.QFunctional]:
if needs_observation(child):
child.activation_post_process = get_activation_post_process(child.qconfig, device)
elif isinstance(child, _FusedModule):
# activation_post_process are now added directly to nn.Sequentail/_FusedModule
if needs_observation(child):
insert_activation_post_process(child)
elif _has_special_act_post_process(child):
special_act_post_process = _get_special_act_post_process(child)
insert_activation_post_process(child, special_act_post_process)
elif non_leaf_module_list is not None and type_before_parametrizations(child) in non_leaf_module_list:
if needs_observation(child):
insert_activation_post_process(child)
elif needs_observation(child) and type_before_parametrizations(child) in custom_module_class_mapping:
observed_child = custom_module_class_mapping[type_before_parametrizations(child)].from_float(child)
setattr(module, name, observed_child)
# TODO: These are the modules that cannot be observed
# Once there are more, we should move them to a separate list
if custom_module_class_mapping[type_before_parametrizations(child)] not in no_observer_set():
insert_activation_post_process(observed_child)
else:
add_observer_(child, qconfig_propagation_list, non_leaf_module_list, device, custom_module_class_mapping)
# Insert observers only for leaf nodes, note that this observer is for
# the output of the module, for input QuantStub will observe them
if has_no_children_ignoring_parametrizations(module) and not isinstance(module, torch.nn.Sequential) \
and type_before_parametrizations(module) in qconfig_propagation_list:
insert_activation_post_process(module)
def get_unique_devices_(module):
return {p.device for p in module.parameters()} | \
{p.device for p in module.buffers()}
def add_quant_dequant(module):
r"""Wrap the leaf child module in QuantWrapper if it has a valid qconfig
Note that this function will modify the children of module inplace and it
can return a new module which wraps the input module as well.
Args:
module: input module with qconfig attributes for all the leaf modules
that we want to quantize
Return:
Either the inplace modified module with submodules wrapped in
`QuantWrapper` based on qconfig or a new `QuantWrapper` module which
wraps the input module, the latter case only happens when the input
module is a leaf module and we want to quantize it.
"""
if has_no_children_ignoring_parametrizations(module) and hasattr(module, 'qconfig') and module.qconfig:
return QuantWrapper(module)
for name, child in module.named_children():
module._modules[name] = add_quant_dequant(child)
return module
def prepare(model, inplace=False, allow_list=None,
observer_non_leaf_module_list=None,
prepare_custom_config_dict=None):
r"""Prepares a copy of the model for quantization calibration or quantization-aware training.
Quantization configuration should be assigned preemptively
to individual submodules in `.qconfig` attribute.
The model will be attached with observer or fake quant modules, and qconfig
will be propagated.
Args:
`model`: input model to be modified in-place
`inplace`: carry out model transformations in-place, the original module is mutated
`allow_list`: list of quantizable modules
`observer_non_leaf_module_list`: list of non-leaf modules we want to add observer
`prepare_custom_config_dict`: customization configuration dictionary for prepare function
.. code-block:: python
# Example of prepare_custom_config_dict:
prepare_custom_config_dict = {
# user will manually define the corresponding observed
# module class which has a from_float class method that converts
# float custom module to observed custom module
"float_to_observed_custom_module_class": {
CustomModule: ObservedCustomModule
}
}
"""
torch._C._log_api_usage_once("quantization_api.quantize.prepare")
if prepare_custom_config_dict is None:
prepare_custom_config_dict = get_default_custom_config_dict()
custom_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {})
if not inplace:
model = copy.deepcopy(model)
# TODO: remove allow_list
qconfig_propagation_list = allow_list
if allow_list is None:
qconfig_propagation_list = get_default_qconfig_propagation_list()
propagate_qconfig_(model, qconfig_dict=None)
# sanity check common API misusage
if not any(hasattr(m, 'qconfig') and m.qconfig for m in model.modules()):
warnings.warn("None of the submodule got qconfig applied. Make sure you "
"passed correct configuration through `qconfig_dict` or "
"by assigning the `.qconfig` attribute directly on submodules")
add_observer_(
model, qconfig_propagation_list, observer_non_leaf_module_list,
custom_module_class_mapping=custom_module_class_mapping)
return model
def _remove_activation_post_process(module):
# TODO: maybe we should change activation_post_process to _activation_post_process
# to prevent it from being used by user
if hasattr(module, 'activation_post_process') and \
is_activation_post_process(module.activation_post_process):
delattr(module, 'activation_post_process')
# remove activation_post_proceess pre and post hooks
def remove_hooks(pre_hook=False):
hook_map = module._forward_pre_hooks if pre_hook else module._forward_hooks
observer_hook = _observer_forward_pre_hook if pre_hook else _observer_forward_hook
handle_ids_to_remove = set()
for handle_id, hook_fn in hook_map.items():
if hook_fn is observer_hook:
handle_ids_to_remove.add(handle_id)
for handle_id in handle_ids_to_remove:
hook_map.pop(handle_id)
remove_hooks(pre_hook=True)
remove_hooks(pre_hook=False)
# TODO: rename to something more general
def _remove_qconfig(module):
r"""Clean up the qconfig left in the module so that new qconfig can be
propagated.
Args:
module: module to be cleaned up
"""
for child in module.children():
_remove_qconfig(child)
if hasattr(module, "qconfig"):
del module.qconfig
_remove_activation_post_process(module)
def quantize(model, run_fn, run_args, mapping=None, inplace=False):
r"""Quantize the input float model with post training static quantization.
First it will prepare the model for calibration, then it calls
`run_fn` which will run the calibration step, after that we will
convert the model to a quantized model.
Args:
model: input float model
run_fn: a calibration function for calibrating the prepared model
run_args: positional arguments for `run_fn`
inplace: carry out model transformations in-place, the original module is mutated
mapping: correspondence between original module types and quantized counterparts
Return:
Quantized model.
"""
torch._C._log_api_usage_once("quantization_api.quantize.quantize")
if mapping is None:
mapping = get_default_static_quant_module_mappings()
if not inplace:
model = copy.deepcopy(model)
model.eval()
prepare(model, inplace=True)
run_fn(model, *run_args)
convert(model, mapping, inplace=True)
return model
def quantize_dynamic(model, qconfig_spec=None, dtype=torch.qint8,
mapping=None, inplace=False):
r"""Converts a float model to dynamic (i.e. weights-only) quantized model.
Replaces specified modules with dynamic weight-only quantized versions and output the quantized model.
For simplest usage provide `dtype` argument that can be float16 or qint8. Weight-only quantization
by default is performed for layers with large weights size - i.e. Linear and RNN variants.
Fine grained control is possible with `qconfig` and `mapping` that act similarly to `quantize()`.
If `qconfig` is provided, the `dtype` argument is ignored.
Args:
model: input model
qconfig_spec: Either:
- A dictionary that maps from name or type of submodule to quantization
configuration, qconfig applies to all submodules of a given
module unless qconfig for the submodules are specified (when the
submodule already has qconfig attribute). Entries in the dictionary
need to be QConfig instances.
- A set of types and/or submodule names to apply dynamic quantization to,
in which case the `dtype` argument is used to specify the bit-width
inplace: carry out model transformations in-place, the original module is mutated
mapping: maps type of a submodule to a type of corresponding dynamically quantized version
with which the submodule needs to be replaced
"""
torch._C._log_api_usage_once("quantization_api.quantize.quantize_dynamic")
if qconfig_spec is None:
if dtype == torch.qint8:
qconfig_spec = {
nn.Linear : default_dynamic_qconfig,
nn.LSTM : default_dynamic_qconfig,
nn.GRU : default_dynamic_qconfig,
nn.LSTMCell : default_dynamic_qconfig,
nn.RNNCell : default_dynamic_qconfig,
nn.GRUCell : default_dynamic_qconfig,
}
elif dtype == torch.float16:
qconfig_spec = {
nn.Linear : float16_dynamic_qconfig,
nn.LSTM : float16_dynamic_qconfig,
nn.GRU : float16_dynamic_qconfig,
nn.LSTMCell : float16_dynamic_qconfig,
nn.RNNCell : float16_dynamic_qconfig,
nn.GRUCell : float16_dynamic_qconfig,
}
elif dtype == torch.quint8:
qconfig_spec = {
nn.EmbeddingBag : float_qparams_weight_only_qconfig,
nn.Embedding : float_qparams_weight_only_qconfig,
}
elif dtype == torch.quint4x2:
qconfig_spec = {
nn.EmbeddingBag : float_qparams_weight_only_qconfig_4bit,
}
else:
raise ValueError(
"Don't know how to quantize with default settings for {}. Provide full qconfig please".format(dtype))
elif isinstance(qconfig_spec, set):
if dtype is torch.qint8:
default_qconfig = default_dynamic_qconfig
elif dtype is torch.float16:
default_qconfig = float16_dynamic_qconfig
elif dtype is torch.quint8:
default_qconfig = float_qparams_weight_only_qconfig
elif dtype is torch.quint4x2:
default_qconfig = float_qparams_weight_only_qconfig_4bit
else:
raise RuntimeError('Unknown dtype specified for quantize_dynamic: ', str(dtype))
qconfig_spec = dict(zip(qconfig_spec, itertools.repeat(default_qconfig)))
if mapping is None:
mapping = get_default_dynamic_quant_module_mappings()
if not inplace:
model = copy.deepcopy(model)
model.eval()
propagate_qconfig_(model, qconfig_spec)
convert(model, mapping, inplace=True)
return model
def prepare_qat(model, mapping=None, inplace=False):
r"""
Prepares a copy of the model for quantization calibration or
quantization-aware training and converts it to quantized version.
Quantization configuration should be assigned preemptively
to individual submodules in `.qconfig` attribute.
Args:
model: input model to be modified in-place
mapping: dictionary that maps float modules to quantized modules to be
replaced.
inplace: carry out model transformations in-place, the original module
is mutated
"""
torch._C._log_api_usage_once("quantization_api.quantize.prepare_qat")
assert model.training, "prepare_qat only works on models in training mode"
if mapping is None:
mapping = get_default_qat_module_mappings()
if not inplace:
model = copy.deepcopy(model)
propagate_qconfig_(model, qconfig_dict=None)
convert(model, mapping=mapping, inplace=True, remove_qconfig=False)
prepare(model, observer_non_leaf_module_list=set(mapping.values()), inplace=True)
return model
def quantize_qat(model, run_fn, run_args, inplace=False):
r"""Do quantization aware training and output a quantized model
Args:
model: input model
run_fn: a function for evaluating the prepared model, can be a
function that simply runs the prepared model or a training
loop
run_args: positional arguments for `run_fn`
Return:
Quantized model.
"""
torch._C._log_api_usage_once("quantization_api.quantize.quantize_qat")
if not inplace:
model = copy.deepcopy(model)
model.train()
prepare_qat(model, inplace=True)
run_fn(model, *run_args)
convert(model, inplace=True)
return model
def convert(
module, mapping=None, inplace=False, remove_qconfig=True,
is_reference=False, convert_custom_config_dict=None):
r"""Converts submodules in input module to a different module according to `mapping`
by calling `from_float` method on the target module class. And remove qconfig at the
end if remove_qconfig is set to True.
Args:
`module`: prepared and calibrated module
`mapping`: a dictionary that maps from source module type to target
module type, can be overwritten to allow swapping user defined
Modules
`inplace`: carry out model transformations in-place, the original module
is mutated
`convert_custom_config_dict`: custom configuration dictionary for convert function
.. code-block:: python
# Example of convert_custom_config_dict:
convert_custom_config_dict = {
# user will manually define the corresponding quantized
# module class which has a from_observed class method that converts
# observed custom module to quantized custom module
"observed_to_quantized_custom_module_class": {
ObservedCustomModule: QuantizedCustomModule
}
}
"""
torch._C._log_api_usage_once("quantization_api.quantize.convert")
if not inplace:
module = copy.deepcopy(module)
_convert(
module, mapping, inplace=True, is_reference=is_reference,
convert_custom_config_dict=convert_custom_config_dict)
if remove_qconfig:
_remove_qconfig(module)
return module
def _convert(
module, mapping=None, inplace=False,
is_reference=False, convert_custom_config_dict=None):
r"""Converts submodules in input module to a different module according to `mapping`
by calling `from_float` method on the target module class
Args:
module: input module
mapping: a dictionary that maps from source module type to target
module type, can be overwritten to allow swapping user defined
Modules
inplace: carry out model transformations in-place, the original module
is mutated
is_reference: a flag to enable quantized reference module
"""
if mapping is None:
mapping = get_default_static_quant_reference_module_mappings() if is_reference \
else get_default_static_quant_module_mappings()
if convert_custom_config_dict is None:
convert_custom_config_dict = get_default_custom_config_dict()
custom_module_class_mapping = convert_custom_config_dict.get("observed_to_quantized_custom_module_class", {})
if not inplace:
module = copy.deepcopy(module)
reassign = {}
for name, mod in module.named_children():
# both fused modules and observed custom modules are
# swapped as one unit
if not isinstance(mod, _FusedModule) and \
type_before_parametrizations(mod) not in custom_module_class_mapping:
_convert(mod, mapping, True, # inplace
is_reference, convert_custom_config_dict)
reassign[name] = swap_module(mod, mapping, custom_module_class_mapping)
for key, value in reassign.items():
module._modules[key] = value
return module
def swap_module(mod, mapping, custom_module_class_mapping):
r"""Swaps the module if it has a quantized counterpart and it has an
`observer` attached.
Args:
mod: input module
mapping: a dictionary that maps from nn module to nnq module
Return:
The corresponding quantized module of `mod`
"""
new_mod = mod
if hasattr(mod, 'qconfig') and mod.qconfig is not None:
swapped = False
if type_before_parametrizations(mod) in custom_module_class_mapping:
new_mod = custom_module_class_mapping[type_before_parametrizations(mod)].from_observed(mod)
swapped = True
elif type_before_parametrizations(mod) in mapping:
qmod = mapping[type_before_parametrizations(mod)]
if hasattr(qmod, '_IS_REFERENCE') and qmod._IS_REFERENCE:
assert mod.qconfig is not None
weight_post_process = mod.qconfig.weight()
weight_post_process(mod.weight)
weight_qparams = get_qparam_dict(weight_post_process)
new_mod = qmod.from_float(mod, weight_qparams)
else:
new_mod = qmod.from_float(mod)
swapped = True
if swapped:
# Preserve module's pre forward hooks. They'll be called on quantized input
for pre_hook_fn in mod._forward_pre_hooks.values():
new_mod.register_forward_pre_hook(pre_hook_fn)
# Preserve module's post forward hooks except _observer_forward_hook
# After convert they'll work with quantized output
for hook_fn in mod._forward_hooks.values():
if hook_fn is not _observer_forward_hook:
new_mod.register_forward_hook(hook_fn)
# respect device affinity when swapping modules
devices = get_unique_devices_(mod)
assert len(devices) <= 1, (
"swap_module only works with cpu or single-device CUDA modules, "
"but got devices {}".format(devices)
)
device = next(iter(devices)) if len(devices) > 0 else None
if device:
new_mod.to(device)
return new_mod
def get_observer_dict(mod, target_dict, prefix=""):
r"""Traverse the modules and save all observers into dict.
This is mainly used for quantization accuracy debug
Args:
mod: the top module we want to save all observers
prefix: the prefix for the current module
target_dict: the dictionary used to save all the observers
"""
def get_prefix(prefix):
return prefix if prefix == "" else prefix + '.'
if hasattr(mod, 'activation_post_process'):
target_dict[get_prefix(prefix) + 'activation_post_process'] = mod.activation_post_process
for name, child in mod.named_children():
module_prefix = get_prefix(prefix) + name if prefix else name
get_observer_dict(child, target_dict, module_prefix)
|
pytorch-master
|
torch/ao/quantization/quantize.py
|
import torch
import copy
from typing import Dict, Any
_supported_types = {torch.nn.Conv2d, torch.nn.Linear}
_supported_intrinsic_types = {torch.nn.intrinsic.ConvReLU2d, torch.nn.intrinsic.LinearReLU}
_all_supported_types = _supported_types.union(_supported_intrinsic_types)
def set_module_weight(module, weight) -> None:
if type(module) in _supported_types:
module.weight = torch.nn.Parameter(weight)
else:
module[0].weight = torch.nn.Parameter(weight)
def set_module_bias(module, bias) -> None:
if type(module) in _supported_types:
module.bias = torch.nn.Parameter(bias)
else:
module[0].bias = torch.nn.Parameter(bias)
def get_module_weight(module):
if type(module) in _supported_types:
return module.weight
else:
return module[0].weight
def get_module_bias(module):
if type(module) in _supported_types:
return module.bias
else:
return module[0].bias
def max_over_ndim(input, axis_list, keepdim=False):
''' Applies 'torch.max' over the given axises
'''
axis_list.sort(reverse=True)
for axis in axis_list:
input, _ = input.max(axis, keepdim)
return input
def min_over_ndim(input, axis_list, keepdim=False):
''' Applies 'torch.min' over the given axises
'''
axis_list.sort(reverse=True)
for axis in axis_list:
input, _ = input.min(axis, keepdim)
return input
def channel_range(input, axis=0):
''' finds the range of weights associated with a specific channel
'''
size_of_tensor_dim = input.ndim
axis_list = list(range(size_of_tensor_dim))
axis_list.remove(axis)
mins = min_over_ndim(input, axis_list)
maxs = max_over_ndim(input, axis_list)
assert mins.size(0) == input.size(axis), "Dimensions of resultant channel range does not match size of requested axis"
return maxs - mins
def cross_layer_equalization(module1, module2, output_axis=0, input_axis=1):
''' Given two adjacent tensors', the weights are scaled such that
the ranges of the first tensors' output channel are equal to the
ranges of the second tensors' input channel
'''
if type(module1) not in _all_supported_types or type(module2) not in _all_supported_types:
raise ValueError("module type not supported:", type(module1), " ", type(module2))
weight1 = get_module_weight(module1)
weight2 = get_module_weight(module2)
if weight1.size(output_axis) != weight2.size(input_axis):
raise TypeError("Number of output channels of first arg do not match \
number input channels of second arg")
bias = get_module_bias(module1)
weight1_range = channel_range(weight1, output_axis)
weight2_range = channel_range(weight2, input_axis)
# producing scaling factors to applied
weight2_range += 1e-9
scaling_factors = torch.sqrt(weight1_range / weight2_range)
inverse_scaling_factors = torch.reciprocal(scaling_factors)
bias = bias * inverse_scaling_factors
# formatting the scaling (1D) tensors to be applied on the given argument tensors
# pads axis to (1D) tensors to then be broadcasted
size1 = [1] * weight1.ndim
size1[output_axis] = weight1.size(output_axis)
size2 = [1] * weight2.ndim
size2[input_axis] = weight2.size(input_axis)
scaling_factors = torch.reshape(scaling_factors, size2)
inverse_scaling_factors = torch.reshape(inverse_scaling_factors, size1)
weight1 = weight1 * inverse_scaling_factors
weight2 = weight2 * scaling_factors
set_module_weight(module1, weight1)
set_module_bias(module1, bias)
set_module_weight(module2, weight2)
def equalize(model, paired_modules_list, threshold=1e-4, inplace=True):
''' Given a list of adjacent modules within a model, equalization will
be applied between each pair, this will repeated until convergence is achieved
Keeps a copy of the changing modules from the previous iteration, if the copies
are not that different than the current modules (determined by converged_test),
then the modules have converged enough that further equalizing is not necessary
Implementation of this referced section 4.1 of this paper https://arxiv.org/pdf/1906.04721.pdf
Args:
model: a model (nn.module) that equalization is to be applied on
paired_modules_list: a list of lists where each sublist is a pair of two
submodules found in the model, for each pair the two submodules generally
have to be adjacent in the model to get expected/reasonable results
threshold: a number used by the converged function to determine what degree
similarity between models is necessary for them to be called equivalent
inplace: determines if function is inplace or not
'''
if not inplace:
model = copy.deepcopy(model)
name_to_module : Dict[str, torch.nn.Module] = {}
previous_name_to_module: Dict[str, Any] = {}
name_set = {name for pair in paired_modules_list for name in pair}
for name, module in model.named_modules():
if name in name_set:
name_to_module[name] = module
previous_name_to_module[name] = None
while not converged(name_to_module, previous_name_to_module, threshold):
for pair in paired_modules_list:
previous_name_to_module[pair[0]] = copy.deepcopy(name_to_module[pair[0]])
previous_name_to_module[pair[1]] = copy.deepcopy(name_to_module[pair[1]])
cross_layer_equalization(name_to_module[pair[0]], name_to_module[pair[1]])
return model
def converged(curr_modules, prev_modules, threshold=1e-4):
''' Tests for the summed norm of the differences between each set of modules
being less than the given threshold
Takes two dictionaries mapping names to modules, the set of names for each dictionary
should be the same, looping over the set of names, for each name take the differnce
between the associated modules in each dictionary
'''
if curr_modules.keys() != prev_modules.keys():
raise ValueError("The keys to the given mappings must have the same set of names of modules")
summed_norms = torch.tensor(0.)
if None in prev_modules.values():
return False
for name in curr_modules.keys():
curr_weight = get_module_weight(curr_modules[name])
prev_weight = get_module_weight(prev_modules[name])
difference = curr_weight.sub(prev_weight)
summed_norms += torch.norm(difference)
return bool(summed_norms < threshold)
|
pytorch-master
|
torch/ao/quantization/_equalize.py
|
# TODO: the name of this file is probably confusing, remove this file and move the type
# definitions to somewhere else, e.g. to .utils
from typing import Any, Tuple, Union
from torch.fx import Node
from .utils import Pattern # noqa: F401
NodePattern = Union[Tuple[Node, Node], Tuple[Node, Tuple[Node, Node]], Any]
# This is the Quantizer class instance from torch/quantization/fx/quantize.py.
# Define separately to prevent circular imports.
# TODO(future PR): improve this.
QuantizerCls = Any
__all__ = [
"Pattern",
"NodePattern",
"QuantizerCls",
]
|
pytorch-master
|
torch/ao/quantization/quantization_types.py
|
"""
This module implements modules which are used to perform fake quantization
during QAT.
"""
import torch
from torch.nn import Module
from torch.ao.quantization.observer import (
MovingAverageMinMaxObserver,
HistogramObserver,
MovingAveragePerChannelMinMaxObserver,
FixedQParamsObserver,
default_fixed_qparams_range_0to1_observer,
default_fixed_qparams_range_neg1to1_observer,
_with_args,
)
import re
from abc import ABC, abstractmethod
from typing import Any, Tuple
def _is_per_channel(qscheme: 'torch.qscheme') -> bool:
return qscheme in [torch.per_channel_symmetric, torch.per_channel_affine, torch.per_channel_affine_float_qparams]
def _is_per_tensor(qscheme: 'torch.qscheme') -> bool:
return qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]
def _is_symmetric_quant(qscheme: 'torch.qscheme') -> bool:
return qscheme in [torch.per_tensor_symmetric, torch.per_channel_symmetric]
def _is_float_qparams(qscheme: 'torch.qscheme') -> bool:
return qscheme in [torch.per_channel_affine_float_qparams, ]
class FakeQuantizeBase(ABC, Module):
r""" Base fake quantize module
Any fake quantize implementation should derive from this class.
Concrete fake quantize module should follow the same API. In forward, they will update
the statistics of the observed Tensor and fake quantize the input. They should also provide a
`calculate_qparams` function that computes the quantization parameters given
the collected statistics.
"""
fake_quant_enabled: torch.Tensor
observer_enabled: torch.Tensor
def __init__(self):
super().__init__()
# fake_quant_enabled and observer_enabled are buffers to support their
# replication in DDP. Data type is uint8 because NCCL does not support
# bool tensors.
self.register_buffer('fake_quant_enabled', torch.tensor([1], dtype=torch.uint8))
self.register_buffer('observer_enabled', torch.tensor([1], dtype=torch.uint8))
@abstractmethod
def forward(self, x):
pass
@abstractmethod
def calculate_qparams(self, **kwargs):
pass
@torch.jit.export
def enable_fake_quant(self, enabled: bool = True) -> None:
self.fake_quant_enabled[0] = 1 if enabled else 0
@torch.jit.export
def disable_fake_quant(self):
self.enable_fake_quant(False)
@torch.jit.export
def enable_observer(self, enabled: bool = True) -> None:
self.observer_enabled[0] = 1 if enabled else 0
@torch.jit.export
def disable_observer(self):
self.enable_observer(False)
with_args = classmethod(_with_args)
class FakeQuantize(FakeQuantizeBase):
r""" Simulate the quantize and dequantize operations in training time.
The output of this module is given by::
x_out = (
clamp(round(x/scale + zero_point), quant_min, quant_max) - zero_point
) * scale
* :attr:`scale` defines the scale factor used for quantization.
* :attr:`zero_point` specifies the quantized value to which 0 in floating point maps to
* :attr:`fake_quant_enabled` controls the application of fake quantization on tensors, note that
statistics can still be updated.
* :attr:`observer_enabled` controls statistics collection on tensors
* :attr:`dtype` specifies the quantized dtype that is being emulated with fake-quantization,
allowable values are torch.qint8 and torch.quint8.
Args:
observer (module): Module for observing statistics on input tensors and calculating scale
and zero-point.
observer_kwargs (optional): Arguments for the observer module
Attributes:
activation_post_process (Module): User provided module that collects statistics on the input tensor and
provides a method to calculate scale and zero-point.
"""
scale: torch.Tensor
zero_point: torch.Tensor
def __init__(self, observer=MovingAverageMinMaxObserver, quant_min=None, quant_max=None, **observer_kwargs):
super().__init__()
# Populate quant_min/quant_max to observer_kwargs if valid
if quant_min is not None and quant_max is not None:
assert quant_min <= quant_max, \
'quant_min must be less than or equal to quant_max'
dtype = observer_kwargs.get("dtype", torch.quint8)
if hasattr(observer, "p"):
# In case observer is _PartialWrapper, dtype can be stored in
# observer.p.keywords["dtype"]
dtype = getattr(getattr(observer, "p", {}), "keywords", {}).get(
"dtype", dtype
)
assert torch.iinfo(dtype).min <= quant_min, 'quant_min out of bound'
assert quant_max <= torch.iinfo(dtype).max, 'quant_max out of bound'
observer_kwargs.update({"quant_min": quant_min, "quant_max": quant_max})
self.activation_post_process = observer(**observer_kwargs)
# TODO: keeping self.quant_min/max for BC; remove after a couple releases
# Users should use self.activation_post_process.quant_min
self.quant_min = self.activation_post_process.quant_min
self.quant_max = self.activation_post_process.quant_max
if _is_float_qparams(self.activation_post_process.qscheme):
zero_point_dtype = torch.float
else:
zero_point_dtype = torch.int
self.register_buffer('scale', torch.tensor([1.0], dtype=torch.float))
self.register_buffer('zero_point', torch.tensor([0], dtype=zero_point_dtype))
self.dtype = self.activation_post_process.dtype
self.qscheme = self.activation_post_process.qscheme
self.ch_axis = self.activation_post_process.ch_axis \
if hasattr(self.activation_post_process, 'ch_axis') else -1
assert _is_per_channel(self.qscheme) or \
_is_per_tensor(self.qscheme), \
'Only per channel and per tensor quantization are supported in fake quantize' + \
' got qscheme: ' + str(self.qscheme)
self.is_per_channel = _is_per_channel(self.qscheme)
@torch.jit.export
def calculate_qparams(self):
return self.activation_post_process.calculate_qparams()
def forward(self, X):
if self.observer_enabled[0] == 1:
self.activation_post_process(X.detach())
_scale, _zero_point = self.calculate_qparams()
_scale, _zero_point = _scale.to(self.scale.device), _zero_point.to(self.zero_point.device)
if self.scale.shape != _scale.shape:
self.scale.resize_(_scale.shape)
self.zero_point.resize_(_zero_point.shape)
self.scale.copy_(_scale)
self.zero_point.copy_(_zero_point)
if self.fake_quant_enabled[0] == 1:
if self.is_per_channel:
X = torch.fake_quantize_per_channel_affine(
X, self.scale, self.zero_point,
self.ch_axis, self.activation_post_process.quant_min, self.activation_post_process.quant_max)
else:
X = torch.fake_quantize_per_tensor_affine(
X, self.scale, self.zero_point,
self.activation_post_process.quant_min, self.activation_post_process.quant_max)
return X
@torch.jit.export
def extra_repr(self):
return 'fake_quant_enabled={}, observer_enabled={}, ' \
'quant_min={}, quant_max={}, dtype={}, qscheme={}, ch_axis={}, ' \
'scale={}, zero_point={}'.format(
self.fake_quant_enabled, self.observer_enabled,
self.activation_post_process.quant_min, self.activation_post_process.quant_max,
self.dtype, self.qscheme, self.ch_axis, self.scale, self.zero_point)
def _save_to_state_dict(self, destination, prefix, keep_vars):
# We cannot currently register scalar values as buffers, so need to manually
# specify serialization here.
super(FakeQuantize, self)._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'scale'] = self.scale
destination[prefix + 'zero_point'] = self.zero_point
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
# Removing this function throws an error that the the size of the loaded tensor does not match the original size
# i.e., These buffers start out with numel 0 and become numel 1 once they have their first forward pass.
local_state = ['scale', 'zero_point']
for name in local_state:
key = prefix + name
if key in state_dict:
val = state_dict[key]
# Custom handling to allow loading scale and zero_point
# of size N into uninitialized buffers of size 0. The
# buffers are resized here, and the values are copied in
# the default state_dict loading code of the parent.
if name == 'scale':
self.scale.resize_(val.shape)
else:
assert name == 'zero_point'
self.zero_point.resize_(val.shape)
# For torchscript module we need to update the attributes here since we do not
# call the `_load_from_state_dict` function defined module.py
if torch.jit.is_scripting():
if name == 'scale':
self.scale.copy_(val)
else:
assert name == 'zero_point'
self.zero_point.copy_(val)
elif strict:
missing_keys.append(key)
super(FakeQuantize, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
class FixedQParamsFakeQuantize(FakeQuantize):
""" Simulate quantize and dequantize with fixed quantization
parameters in training time. Only per tensor quantization
is supported.
"""
def __init__(self, observer):
super().__init__(observer=observer)
assert type(self.activation_post_process) == FixedQParamsObserver,\
"%s's observer must be a %s" % (self.__class__.__name__, FixedQParamsObserver.__name__)
self._observer_ctr = observer
self.scale = self.activation_post_process.scale
self.zero_point = self.activation_post_process.zero_point
assert _is_per_tensor(self.qscheme), 'Only per tensor quantization is supported' + \
' FixedQParamsFakeQuantize module, got qscheme:' + str(self.qscheme)
@torch.jit.export
def calculate_qparams(self):
return self.scale, self.zero_point
@torch.jit.export
def extra_repr(self):
return 'fake_quant_enabled={}, observer_enabled={}, scale={}, zero_point={}, ' \
'dtype={}, quant_min={}, quant_max={}, qscheme={}'.format(
self.fake_quant_enabled, self.observer_enabled,
self.scale, self.zero_point, self.dtype,
self.activation_post_process.quant_min, self.activation_post_process.quant_max, self.qscheme)
class FusedMovingAvgObsFakeQuantize(FakeQuantize):
r"""Fused module that is used to observe the input tensor (compute min/max), compute
scale/zero_point and fake_quantize the tensor.
This module uses calculation similar MovingAverageMinMaxObserver for the inputs,
to compute the min/max values in order to compute the scale/zero_point.
The qscheme input in the observer is used to differentiate between symmetric/affine
quantization scheme.
The output of this module is given by
x_out = (clamp(round(x/scale + zero_point), quant_min, quant_max)-zero_point)*scale
Similar to :class:`~torch.ao.quantization.FakeQuantize`, and accepts the same attributes as the
base class.
"""
def __init__(
self,
observer: Any = MovingAverageMinMaxObserver,
quant_min: int = 0,
quant_max: int = 255,
**observer_kwargs: Any
) -> None:
super().__init__(observer, quant_min, quant_max, **observer_kwargs)
assert isinstance(self.activation_post_process, (MovingAverageMinMaxObserver, MovingAveragePerChannelMinMaxObserver)),\
"Fused observer+fake_quant module only works with MovingAverageMinMaxObserver"
self.register_buffer("fake_quant_enabled", torch.tensor([1], dtype=torch.long))
self.register_buffer("observer_enabled", torch.tensor([1], dtype=torch.long))
self.is_symmetric_quant = _is_symmetric_quant(self.activation_post_process.qscheme)
@torch.jit.export
def calculate_qparams(self) -> Tuple[torch.Tensor, torch.Tensor]:
return self.activation_post_process.calculate_qparams()
@torch.jit.export
def extra_repr(self) -> str:
return (
"fake_quant_enabled={}, observer_enabled={}, scale={}, zero_point={}, "
"dtype={}, quant_min={}, quant_max={}, qscheme={}, reduce_range={}".format(
self.fake_quant_enabled,
self.observer_enabled,
self.scale,
self.zero_point,
self.dtype,
self.activation_post_process.quant_min,
self.activation_post_process.quant_max,
self.qscheme,
self.activation_post_process.reduce_range,
)
)
def forward(self, X: torch.Tensor) -> torch.Tensor:
return torch.fused_moving_avg_obs_fake_quant(
X,
self.observer_enabled,
self.fake_quant_enabled,
self.activation_post_process.min_val,
self.activation_post_process.max_val,
self.scale,
self.zero_point,
self.activation_post_process.averaging_constant,
self.activation_post_process.quant_min,
self.activation_post_process.quant_max,
self.ch_axis,
self.is_per_channel,
self.is_symmetric_quant,
)
default_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255,
dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)
"""
Default fake_quant for activations.
"""
default_weight_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=-128, quant_max=127,
dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)
"""
Default fake_quant for weights.
Observer is memoryless since averaging_constant is 1.
"""
default_dynamic_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255,
dtype=torch.quint8, averaging_constant=1)
"""
Default dynamic fake_quant for activations.
"""
default_fixed_qparams_range_neg1to1_fake_quant = (
FixedQParamsFakeQuantize.with_args(observer=default_fixed_qparams_range_neg1to1_observer)
)
default_fixed_qparams_range_0to1_fake_quant = (
FixedQParamsFakeQuantize.with_args(observer=default_fixed_qparams_range_0to1_observer)
)
# TODO: the following 2 variables are kept for backwards compatibility; remove after a few releases
default_symmetric_fixed_qparams_fake_quant = default_fixed_qparams_range_neg1to1_fake_quant
default_affine_fixed_qparams_fake_quant = default_fixed_qparams_range_0to1_fake_quant
default_per_channel_weight_fake_quant = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
qscheme=torch.per_channel_symmetric,
reduce_range=False,
ch_axis=0)
"""
Default fake_quant for per-channel weights.
Observer is memoryless since averaging_constant is 1.
"""
default_embedding_fake_quant = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
qscheme=torch.per_channel_affine_float_qparams,
dtype=torch.quint8,
quant_min=0,
quant_max=255,
ch_axis=0,
averaging_constant=1)
"""
Default fake_quant for embeddings.
Observer is memoryless since averaging_constant is 1.
"""
default_embedding_fake_quant_4bit = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
qscheme=torch.per_channel_affine_float_qparams,
ch_axis=0,
dtype=torch.quint4x2,
averaging_constant=1)
default_histogram_fake_quant = FakeQuantize.with_args(observer=HistogramObserver,
quant_min=0,
quant_max=255,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=True)
"""
Fake_quant for activations using a histogram..
"""
default_fused_act_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
dtype=torch.quint8,)
"""
Fused version of `default_fake_quant`, with improved performance.
"""
default_fused_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
qscheme=torch.per_tensor_symmetric)
"""
Fused version of `default_weight_fake_quant`, with improved performance.
"""
default_fused_per_channel_wt_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
qscheme=torch.per_channel_symmetric)
"""
Fused version of `default_per_channel_weight_fake_quant`, with improved performance.
"""
fused_wt_fake_quant_range_neg_127_to_127 = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=-127,
quant_max=127,
dtype=torch.qint8,
qscheme=torch.per_tensor_symmetric,
eps=2 ** -12)
"""
Fused version of `default_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128.
"""
fused_per_channel_wt_fake_quant_range_neg_127_to_127 = FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=-127,
quant_max=127,
dtype=torch.qint8,
qscheme=torch.per_channel_symmetric,
eps=2 ** -12)
"""
Fused version of `default_per_channel_weight_fake_quant`, with the 8-bit values restricted to [-127, +127], excluding -128.
"""
def _is_fake_quant_script_module(mod):
''' Returns true if given mod is an instance of FakeQuantize script module.
'''
if isinstance(mod, torch.jit.RecursiveScriptModule):
# qualified name looks like '__torch__.torch.ao.quantization.fake_quantize.___torch_mangle_2.FakeQuantize'
suffix = mod._c.qualified_name.split('.', 1)[1]
name = re.sub(r'\.___torch_mangle_\d+', '', suffix)
return name == 'torch.ao.quantization.fake_quantize.FakeQuantize' or \
name == 'torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize'
return False
def disable_fake_quant(mod):
"""
Disable fake quantization for this module, if applicable. Example usage::
# model is any PyTorch model
model.apply(torch.ao.quantization.disable_fake_quant)
"""
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
mod.disable_fake_quant()
def enable_fake_quant(mod):
"""
Enable fake quantization for this module, if applicable. Example usage::
# model is any PyTorch model
model.apply(torch.ao.quantization.enable_fake_quant)
"""
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
mod.enable_fake_quant()
def disable_observer(mod):
"""
Disable observation for this module, if applicable. Example usage::
# model is any PyTorch model
model.apply(torch.ao.quantization.disable_observer)
"""
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
mod.disable_observer()
def enable_observer(mod):
"""
Enable observation for this module, if applicable. Example usage::
# model is any PyTorch model
model.apply(torch.ao.quantization.enable_observer)
"""
if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod):
mod.enable_observer()
|
pytorch-master
|
torch/ao/quantization/fake_quantize.py
|
from collections import namedtuple
from typing import Optional, Any, Union
import torch
import torch.nn as nn
from torch.ao.quantization.fake_quantize import (
FakeQuantize,
FakeQuantizeBase,
default_fake_quant,
default_dynamic_fake_quant,
default_per_channel_weight_fake_quant,
default_weight_fake_quant,
default_fused_act_fake_quant,
default_fused_wt_fake_quant,
FusedMovingAvgObsFakeQuantize,
default_fused_per_channel_wt_fake_quant,
default_embedding_fake_quant,
default_embedding_fake_quant_4bit,
fused_wt_fake_quant_range_neg_127_to_127,
fused_per_channel_wt_fake_quant_range_neg_127_to_127,
)
from .observer import (
_PartialWrapper,
HistogramObserver,
MovingAverageMinMaxObserver,
NoopObserver,
PlaceholderObserver,
ReuseInputObserver,
default_debug_observer,
default_dynamic_quant_observer,
default_float_qparams_observer,
default_float_qparams_observer_4bit,
default_observer,
default_per_channel_weight_observer,
default_placeholder_observer,
default_weight_observer,
weight_observer_range_neg_127_to_127,
per_channel_weight_observer_range_neg_127_to_127,
default_reuse_input_observer,
ObserverBase,
)
import warnings
import copy
__all__ = [
"QConfig",
# TODO: deprecated, remove
"QConfigDynamic",
"default_qconfig",
"default_debug_qconfig",
"default_per_channel_qconfig",
"default_dynamic_qconfig",
"float16_dynamic_qconfig",
"float16_static_qconfig",
"per_channel_dynamic_qconfig",
"float_qparams_weight_only_qconfig",
"float_qparams_weight_only_qconfig_4bit",
"default_qat_qconfig",
"default_dynamic_qat_qconfig",
"default_weight_only_qconfig",
"default_activation_only_qconfig",
"default_qat_qconfig_v2",
"default_reuse_input_qconfig",
"default_symmetric_qnnpack_qconfig",
"default_per_channel_symmetric_qnnpack_qconfig",
"default_symmetric_qnnpack_qat_qconfig",
"default_per_channel_symmetric_qnnpack_qat_qconfig",
"default_embedding_qat_qconfig",
"default_embedding_qat_qconfig_4bit",
"get_default_qconfig",
"get_default_qat_qconfig",
"get_default_qconfig_dict",
"get_default_qat_qconfig_dict",
"assert_valid_qconfig",
"add_module_to_qconfig_obs_ctr",
"QConfigAny",
"obs_or_fq_ctr_equals",
"qconfig_equals",
"activation_is_memoryless",
"is_reuse_input_qconfig",
]
class QConfig(namedtuple('QConfig', ['activation', 'weight'])):
"""
Describes how to quantize a layer or a part of the network by providing
settings (observer classes) for activations and weights respectively.
Note that QConfig needs to contain observer **classes** (like MinMaxObserver) or a callable that returns
instances on invocation, not the concrete observer instances themselves.
Quantization preparation function will instantiate observers multiple times for each of the layers.
Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args`
method (that behaves like functools.partial)::
my_qconfig = QConfig(
activation=MinMaxObserver.with_args(dtype=torch.qint8),
weight=default_observer.with_args(dtype=torch.qint8))
"""
def __new__(cls, activation, weight):
# catch common mistakes
if isinstance(activation, nn.Module) or isinstance(weight, nn.Module):
raise ValueError("QConfig received observer instance, please pass observer class instead. " +
"Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
return super(QConfig, cls).__new__(cls, activation, weight)
class QConfigDynamic(namedtuple('QConfigDynamic', ['activation', 'weight'])):
"""
Describes how to dynamically quantize a layer or a part of the network by providing
settings (observer classes) for weights.
It's like QConfig, but for dynamic quantization.
Note that QConfigDynamic needs to contain observer **classes** (like MinMaxObserver) or a callable that returns
instances on invocation, not the concrete observer instances themselves.
Quantization function will instantiate observers multiple times for each of the layers.
Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args`
method (that behaves like functools.partial)::
my_qconfig = QConfigDynamic(weight=default_observer.with_args(dtype=torch.qint8))
"""
def __new__(cls, activation=torch.nn.Identity, weight=torch.nn.Identity):
# catch common mistakes
if isinstance(weight, nn.Module):
raise ValueError("QConfigDynamic received observer instance, please pass observer class instead. " +
"Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
warnings.warn("QConfigDynamic is going to be deprecated in PyTorch 1.12, please use QConfig instead")
return super(QConfigDynamic, cls).__new__(cls, activation, weight)
default_qconfig = QConfig(activation=default_observer,
weight=default_weight_observer)
"""
Default qconfig configuration.
"""
default_debug_qconfig = QConfig(weight=default_weight_observer,
activation=default_debug_observer)
"""
Default qconfig configuration for debugging.
"""
default_per_channel_qconfig = QConfig(activation=default_observer,
weight=default_per_channel_weight_observer)
"""
Default qconfig configuration for per channel weight quantization.
"""
default_dynamic_qconfig = QConfig(activation=default_dynamic_quant_observer,
weight=default_weight_observer)
"""
Default dynamic qconfig.
"""
float16_dynamic_qconfig = QConfig(activation=PlaceholderObserver.with_args(dtype=torch.float32, compute_dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
"""
Dynamic qconfig with weights quantized to `torch.float16`.
"""
float16_static_qconfig = QConfig(activation=PlaceholderObserver.with_args(dtype=torch.float16),
weight=PlaceholderObserver.with_args(dtype=torch.float16))
"""
Dynamic qconfig with both activations and weights quantized to `torch.float16`.
"""
per_channel_dynamic_qconfig = QConfig(activation=default_dynamic_quant_observer,
weight=default_per_channel_weight_observer)
"""
Dynamic qconfig with weights quantized per channel.
"""
float_qparams_weight_only_qconfig = QConfig(
activation=default_placeholder_observer,
weight=default_float_qparams_observer)
"""
Dynamic qconfig with weights quantized with a floating point zero_point.
"""
float_qparams_weight_only_qconfig_4bit = QConfig(
activation=default_placeholder_observer,
weight=default_float_qparams_observer_4bit)
default_qat_qconfig = QConfig(activation=default_fake_quant,
weight=default_weight_fake_quant)
"""
Default qconfig for QAT.
"""
default_dynamic_qat_qconfig = QConfig(activation=default_dynamic_fake_quant,
weight=default_weight_fake_quant)
"""
Default qconfig for dynamic QAT.
"""
default_weight_only_qconfig = QConfig(activation=torch.nn.Identity,
weight=default_weight_fake_quant)
"""
Default qconfig for quantizing weights only.
"""
default_activation_only_qconfig = QConfig(activation=default_fake_quant,
weight=torch.nn.Identity)
"""
Default qconfig for quantizing activations only.
"""
# QAT config that uses a fused observer + fake quant modules for optimized training performance.
# to modify the activation/weight observers, the default entries in fake_quantize.py can be modified.
default_qat_qconfig_v2 = QConfig(activation=default_fused_act_fake_quant, weight=default_fused_wt_fake_quant)
"""
Fused version of `default_qat_config`, has performance benefits.
"""
default_reuse_input_qconfig = QConfig(activation=default_reuse_input_observer,
weight=NoopObserver)
"""
Default qconfig for operators that reuse the observers from input Tensor, e.g. reshape
"""
def get_default_qconfig(backend='fbgemm', version=0):
"""
Returns the default PTQ qconfig for the specified backend.
Args:
* `backend`: a string representing the target backend. Currently supports `fbgemm`,
`qnnpack` and `onednn`.
Return:
qconfig
"""
if version == 0:
if backend == 'fbgemm':
qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=True),
weight=default_per_channel_weight_observer)
elif backend == 'qnnpack':
qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False),
weight=default_weight_observer)
elif backend == 'onednn':
qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False),
weight=default_per_channel_weight_observer)
else:
qconfig = default_qconfig
else:
raise AssertionError("Version number: " + str(version) +
" in get_default_qconfig is not supported. Version number must be 0")
return qconfig
"""
Default, symmetric PTQ qconfig for the specified backend. And a per_channel
variant of the same.
Symmetric here applies to signed weights with zero point = 0, and additional
value restrictions. The activations are also signed 8-bit integers with this
qconfig.
* Once this change is merged [as of 3/17/22], with backend or qengine =
'qnnpack', some quantized operators with this symmetric qconfig may use
operators from xnnpack library.
** Support to use xnnpack ops with `qnnpack` backed for asymmetric
qconfig (returned by get_default_qconfig()) is not available yet.
* This qconfig uses signed activations and weights. Weights have added
restrictions such as zero point is forced to be 0, making the weights
symmetric, hence the name. And the 8-bit quantized values are
restricting to to [-127, +127], excluding -128.
* xnnpack has a requantization scale value restriction, 0x1p-32 <=
requantization_scale < 256.0 where, `requantization_scale = (input_scale
* kernel_scale) / (output_scale)`. Using this eps (w/ assumed max value
of 256) is to prevent requantization_scale to go below xnnpack lower
threshold.
"""
default_symmetric_qnnpack_qconfig = QConfig(activation=HistogramObserver.with_args(dtype=torch.qint8,
reduce_range=False,
eps=2 ** -12),
weight=weight_observer_range_neg_127_to_127)
default_per_channel_symmetric_qnnpack_qconfig = QConfig(activation=HistogramObserver.with_args(dtype=torch.qint8,
reduce_range=False,
eps=2 ** -12),
weight=per_channel_weight_observer_range_neg_127_to_127)
default_embedding_qat_qconfig = QConfig(activation=NoopObserver.with_args(dtype=torch.float32),
weight=default_embedding_fake_quant)
default_embedding_qat_qconfig_4bit = QConfig(activation=NoopObserver.with_args(dtype=torch.float32),
weight=default_embedding_fake_quant_4bit)
def get_default_qat_qconfig(backend='fbgemm', version=1):
"""
Returns the default QAT qconfig for the specified backend.
Args:
* `backend`: a string representing the target backend. Currently supports `fbgemm`,
`qnnpack` and `onednn`.
* `version`: version, for backwards compatibility. Can be `None` or `1`.
Return:
qconfig
"""
# Histogram observer is too slow for quantization aware training
if version == 0:
if backend == 'fbgemm':
qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
reduce_range=True),
weight=default_per_channel_weight_fake_quant)
elif backend == 'qnnpack':
qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
reduce_range=False),
weight=default_weight_fake_quant)
elif backend == 'onednn':
qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255),
weight=default_per_channel_weight_fake_quant)
else:
qconfig = default_qat_qconfig
# Use the fused observe + fake_quant modules for doing QAT.
elif version == 1:
if backend == 'fbgemm':
qconfig = QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
reduce_range=True),
weight=default_fused_per_channel_wt_fake_quant)
elif backend == 'qnnpack':
qconfig = QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
reduce_range=False),
weight=default_fused_wt_fake_quant)
elif backend == 'onednn':
qconfig = QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255),
weight=default_fused_per_channel_wt_fake_quant)
else:
qconfig = default_qat_qconfig_v2
else:
raise AssertionError("Version number: " + str(version) +
"in get_default_qat_qconfig is not supported. Version number must be 0 or 1")
return qconfig
"""
Default symmetric QAT qconfig for qnnpack. And its per channel weight variant.
"""
default_symmetric_qnnpack_qat_qconfig = QConfig(
activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
reduce_range=False,
eps=2 ** -12),
weight=fused_wt_fake_quant_range_neg_127_to_127)
default_per_channel_symmetric_qnnpack_qat_qconfig = QConfig(
activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
reduce_range=False,
eps=2 ** -12),
weight=fused_per_channel_wt_fake_quant_range_neg_127_to_127)
def get_default_qconfig_dict(backend='fbgemm', version=0):
warnings.warn(
"torch.ao.quantization.get_default_qconfig_dict is deprecated and will be removed in "
"a future version. Please use torch.ao.quantization.get_default_qconfig_mapping instead.")
return torch.ao.quantization.get_default_qconfig_mapping(backend, version).to_dict()
def get_default_qat_qconfig_dict(backend='fbgemm', version=1):
warnings.warn(
"torch.ao.quantization.get_default_qat_qconfig_dict is deprecated and will be removed in "
"a future version. Please use torch.ao.quantization.get_default_qat_qconfig_mapping instead.")
return torch.ao.quantization.get_default_qat_qconfig_mapping(backend, version).to_dict()
def assert_valid_qconfig(qconfig: Optional[QConfig],
mod: torch.nn.Module) -> None:
"""
Verifies that this `qconfig` is valid.
"""
if qconfig is None:
return
is_conv_transpose_mod = (
isinstance(mod, torch.nn.ConvTranspose1d) or
isinstance(mod, torch.nn.ConvTranspose2d) or
isinstance(mod, torch.nn.ConvTranspose3d))
if is_conv_transpose_mod:
if qconfig.weight is None:
# for now, we assume that any qconfig for ConvTranspose without a weight is valid
return
example_observer = qconfig.weight()
is_per_channel = (
isinstance(example_observer, torch.ao.quantization.PerChannelMinMaxObserver) or
isinstance(example_observer, torch.ao.quantization.MovingAveragePerChannelMinMaxObserver)
)
assert not is_per_channel, \
'Per channel weight observer is not supported yet for ConvTranspose{n}d.'
# TODO: remove QConfigAny and replace it with Optional[QConfig]
QConfigAny = Optional[QConfig]
def add_module_to_qconfig_obs_ctr(
qconfig: QConfigAny,
module: Optional[nn.Module]) -> Any:
r"""This is a helper function for use in quantization prepare that updates a qconfig so that
the constructors stored in the qconfig will create observers on the same device that
'module' is on. This is intended to be used when the qconfigs are propagated to each
module in order to avoid potential device alignment issues.
Args:
qconfig: QConfig with obs constructors stored in activation and weight
module: module which the qconfig is related to
Return:
qconfig: configured so that obs constructors set to construct on the same device as module
"""
if module is None or qconfig is None or qconfig._fields != ('activation', 'weight'):
return qconfig
def get_factory_kwargs_based_on_module_device():
assert isinstance(module, torch.nn.Module)
devices = {p.device for p in module.parameters()} | \
{p.device for p in module.buffers()}
device = next(iter(devices)) if len(devices) > 0 else None
return None if device is None else {'device': device}
def configure_constructor_to_put_obs_on_module_device(original_constructor):
try:
# check if constructor can accept factory_kwargs
check = original_constructor.with_args(factory_kwargs=None)
check()
return original_constructor.with_callable_args(factory_kwargs=get_factory_kwargs_based_on_module_device)
except AttributeError: # qconfig doesn't have activation or weight
return original_constructor
except TypeError: # the class doesn't accept factory_kwargs argument
return original_constructor
activation = configure_constructor_to_put_obs_on_module_device(qconfig.activation)
weight = configure_constructor_to_put_obs_on_module_device(qconfig.weight)
return QConfig(activation, weight)
_ObserverOrFakeQuantizeConstructor = Union[_PartialWrapper, ObserverBase, FakeQuantizeBase]
def obs_or_fq_ctr_equals(obs_or_fq1: _ObserverOrFakeQuantizeConstructor, obs_or_fq2: _ObserverOrFakeQuantizeConstructor):
if isinstance(obs_or_fq1, _PartialWrapper) and isinstance(obs_or_fq2, _PartialWrapper):
return _partial_wrapper_equals(obs_or_fq1, obs_or_fq2)
return obs_or_fq1 == obs_or_fq2
def _partial_wrapper_equals(obs_or_fq1: _PartialWrapper, obs_or_fq2: _PartialWrapper):
"""
Return whether the two partial wrappers are equal,
"""
# functools.partial has no __eq__ operator defined so '==' defaults to 'is'
obs_or_fq1_keywords = copy.copy(obs_or_fq1.p.keywords)
obs_or_fq2_keywords = copy.copy(obs_or_fq2.p.keywords)
keywords_equal = True
# compare observer constructor with obs_or_fq_ctr_equals since direct compare would fail
if "observer" in obs_or_fq1_keywords and "observer" in obs_or_fq2_keywords:
keywords_equal = keywords_equal and obs_or_fq_ctr_equals(obs_or_fq1_keywords["observer"], obs_or_fq2_keywords["observer"])
obs_or_fq1_keywords.pop("observer")
obs_or_fq2_keywords.pop("observer")
keywords_equal = keywords_equal and obs_or_fq1_keywords == obs_or_fq2_keywords
return obs_or_fq1.p.func == obs_or_fq2.p.func and obs_or_fq1.p.args == obs_or_fq2.p.args and keywords_equal
def qconfig_equals(q1: QConfigAny, q2: QConfigAny):
"""
Returns `True` if `q1` equals `q2`, and `False` otherwise.
"""
if q1 is None or q2 is None:
return q1 == q2
else:
assert q1 is not None and q2 is not None
try:
# Qconfig weight and activation can be either a partial wrapper,
# or an observer class. Special handling is required (above) for
# comparing partial wrappers.
activation_same = obs_or_fq_ctr_equals(q1.activation, q2.activation)
weight_same = obs_or_fq_ctr_equals(q1.weight, q2.weight)
return activation_same and weight_same
except AttributeError:
return q1 == q2
def activation_is_memoryless(qconfig: QConfig):
"""
Return whether the observer for activations defined in the given QConfig is memoryless.
This means a MovingAverage observer with averaging constant equal to 1.
"""
def _is_memoryless(observer):
return hasattr(observer, "averaging_constant") and observer.averaging_constant == 1
act = qconfig.activation()
if isinstance(act, FakeQuantizeBase) and hasattr(act, "activation_post_process"):
return _is_memoryless(act.activation_post_process)
else:
return _is_memoryless(act)
def is_reuse_input_qconfig(qconfig: Optional[QConfig]):
return qconfig is not None and \
isinstance(qconfig.activation(), ReuseInputObserver) and \
isinstance(qconfig.weight(), NoopObserver)
|
pytorch-master
|
torch/ao/quantization/qconfig.py
|
# flake8: noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules # noqa: F403
from .fuse_modules import fuse_modules_qat # noqa: F403
from .fuser_method_mappings import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .qconfig_mapping import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantization_mappings import * # noqa: F403
from .quantize import * # noqa: F403
from .quantize_jit import * # noqa: F403
from .stubs import * # noqa: F403
def default_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
"""
for data, target in calib_data:
model(data)
|
pytorch-master
|
torch/ao/quantization/__init__.py
|
from __future__ import annotations
from collections import OrderedDict
from typing import Any, Callable, Dict, Tuple, Union
import torch
from .fake_quantize import (
default_weight_fake_quant,
FixedQParamsFakeQuantize,
)
from .observer import (
_PartialWrapper,
default_fixed_qparams_range_0to1_observer,
default_fixed_qparams_range_neg1to1_observer,
default_weight_observer,
)
from .qconfig import (
default_reuse_input_qconfig,
get_default_qconfig,
get_default_qat_qconfig,
QConfig,
QConfigAny
)
__all__ = [
"get_default_qconfig_mapping",
"get_default_qat_qconfig_mapping",
"QConfigMapping",
]
# TODO: replace all usages with these constants
GLOBAL_DICT_KEY = ""
OBJECT_TYPE_DICT_KEY = "object_type"
MODULE_NAME_REGEX_DICT_KEY = "module_name_regex"
MODULE_NAME_DICT_KEY = "module_name"
MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY = "module_name_object_type_order"
_FIXED_QPARAMS_OP_TO_OBSERVER: Dict[Union[Callable, str], _PartialWrapper] = {
torch.nn.Hardsigmoid: default_fixed_qparams_range_0to1_observer,
torch.nn.functional.hardsigmoid: default_fixed_qparams_range_0to1_observer,
"hardsigmoid": default_fixed_qparams_range_0to1_observer,
"hardsigmoid_": default_fixed_qparams_range_0to1_observer,
torch.nn.Sigmoid: default_fixed_qparams_range_0to1_observer,
torch.sigmoid: default_fixed_qparams_range_0to1_observer,
"sigmoid": default_fixed_qparams_range_0to1_observer,
"sigmoid_": default_fixed_qparams_range_0to1_observer,
torch.nn.Softmax: default_fixed_qparams_range_0to1_observer,
torch.nn.Tanh: default_fixed_qparams_range_neg1to1_observer,
torch.tanh: default_fixed_qparams_range_neg1to1_observer,
"tanh": default_fixed_qparams_range_neg1to1_observer,
"tanh_": default_fixed_qparams_range_neg1to1_observer,
}
def _get_default_qconfig_mapping(is_qat: bool, backend: str, version: int) -> QConfigMapping:
"""
Return the default QConfigMapping for the given quantization type and backend.
"""
if is_qat:
qconfig = get_default_qat_qconfig(backend, version)
else:
qconfig = get_default_qconfig(backend, version)
default_weight = default_weight_fake_quant if is_qat else default_weight_observer
# default_per_channel_weight_observer is not currently compatible with fbgemm backend
# so we have to modify the weight observer to default_weight_observer or another
# per tensor supported observer.
# see https://github.com/pytorch/pytorch/issues/47535
if backend == "fbgemm":
qconfig_transpose = QConfig(activation=qconfig.activation, weight=default_weight)
else:
qconfig_transpose = qconfig
qconfig_mapping = QConfigMapping() \
.set_global(qconfig) \
.set_object_type("reshape", default_reuse_input_qconfig) \
.set_object_type(torch.nn.Conv1d, qconfig) \
.set_object_type(torch.nn.Conv2d, qconfig) \
.set_object_type(torch.nn.Conv3d, qconfig) \
.set_object_type(torch.nn.ConvTranspose1d, qconfig_transpose) \
.set_object_type(torch.nn.ConvTranspose2d, qconfig_transpose) \
.set_object_type(torch.nn.ConvTranspose3d, qconfig_transpose) \
.set_object_type(torch.nn.Linear, qconfig) \
.set_object_type(torch.nn.functional.conv1d, qconfig) \
.set_object_type(torch.nn.functional.conv2d, qconfig) \
.set_object_type(torch.nn.functional.conv3d, qconfig) \
.set_object_type(torch.nn.functional.conv_transpose1d, qconfig_transpose) \
.set_object_type(torch.nn.functional.conv_transpose2d, qconfig_transpose) \
.set_object_type(torch.nn.functional.conv_transpose3d, qconfig_transpose) \
.set_object_type(torch.nn.functional.linear, qconfig) \
.set_object_type(torch.nn.ReLU, qconfig) \
.set_object_type(torch.nn.functional.relu, qconfig) \
.set_object_type(torch.relu, qconfig) \
.set_object_type(torch.nn.BatchNorm1d, qconfig) \
.set_object_type(torch.nn.BatchNorm2d, qconfig) \
.set_object_type(torch.nn.BatchNorm3d, qconfig)
# Use special observers for ops with fixed qparams
fixed_qparams_observer_to_qconfig: Dict[Any, QConfigAny] = {}
for fixed_qparams_op, observer in _FIXED_QPARAMS_OP_TO_OBSERVER.items():
if observer in fixed_qparams_observer_to_qconfig:
fixed_qparams_qconfig = fixed_qparams_observer_to_qconfig[observer]
else:
if is_qat:
activation = FixedQParamsFakeQuantize.with_args(observer=observer)
else:
activation = observer
fixed_qparams_qconfig = QConfig(activation=activation, weight=default_weight)
fixed_qparams_observer_to_qconfig[observer] = fixed_qparams_qconfig
qconfig_mapping.set_object_type(fixed_qparams_op, fixed_qparams_qconfig)
return qconfig_mapping
def get_default_qconfig_mapping(backend="fbgemm", version=0) -> QConfigMapping:
"""
Return the default QConfigMapping for post training quantization.
"""
return _get_default_qconfig_mapping(False, backend, version)
def get_default_qat_qconfig_mapping(backend="fbgemm", version=1) -> QConfigMapping:
"""
Return the default QConfigMapping for quantization aware training.
"""
return _get_default_qconfig_mapping(True, backend, version)
class QConfigMapping:
"""
Mapping from model ops to :class:`torch.ao.quantization.QConfig`s.
The user can specify QConfigs using the following methods (in increasing match priority):
`set_global`: sets the global (default) QConfig
`set_object_type`: sets the QConfig for a given module type, function, or method name
`set_module_name_regex`: sets the QConfig for modules matching the given regex string
`set_module_name`: sets the QConfig for modules matching the given module name
`set_module_name_object_type_order`: sets the QConfig for modules matching a combination
of the given module name, object type, and the index at which the module appears
Example usage::
qconfig_mapping = QConfigMapping()
.set_global(global_qconfig)
.set_object_type(torch.nn.Linear, qconfig1)
.set_object_type(torch.nn.ReLU, qconfig1)
.set_module_name_regex("foo.*bar.*conv[0-9]+", qconfig1)
.set_module_name_regex("foo.*", qconfig2)
.set_module_name("module1", qconfig1)
.set_module_name("module2", qconfig2)
.set_module_name_object_type_order("foo.bar", torch.nn.functional.linear, 0, qconfig3)
"""
def __init__(self):
# In increasing match priority:
self.global_qconfig: QConfigAny = None
self.object_type_qconfigs: OrderedDict[Union[Callable, str], QConfigAny] = OrderedDict()
self.module_name_regex_qconfigs: OrderedDict[str, QConfigAny] = OrderedDict()
self.module_name_qconfigs: OrderedDict[str, QConfigAny] = OrderedDict()
self.module_name_object_type_order_qconfigs: OrderedDict[Tuple[str, Callable, int], QConfigAny] =\
OrderedDict()
def set_global(self, global_qconfig: QConfigAny) -> QConfigMapping:
"""
Set the global (default) QConfig.
"""
self.global_qconfig = global_qconfig
return self
def set_object_type(self, object_type: Union[Callable, str], qconfig: QConfigAny) -> QConfigMapping:
"""
Set the QConfig for a given module type, function, or method name.
If the QConfig for an existing object type was already set, the new QConfig will override the old one.
"""
self.object_type_qconfigs[object_type] = qconfig
return self
def set_module_name_regex(self, module_name_regex: str, qconfig: QConfigAny) -> QConfigMapping:
"""
Set the QConfig for modules matching the given regex string.
Regexes will be matched in the order in which they are registered through this method.
Thus, the caller should register more specific patterns first, e.g.::
qconfig_mapping = QConfigMapping()
.set_module_name_regex("foo.*bar.*conv[0-9]+", qconfig1)
.set_module_name_regex("foo.*bar.*", qconfig2)
.set_module_name_regex("foo.*", qconfig3)
In this example, "foo.bar.conv0" would match qconfig1, "foo.bar.linear" would match qconfig2,
and "foo.baz.relu" would match qconfig3.
If the QConfig for an existing module name regex was already set, the new QConfig will override the
old one while preserving the order in which the regexes were originally registered.
"""
self.module_name_regex_qconfigs[module_name_regex] = qconfig
return self
def set_module_name(self, module_name: str, qconfig: QConfigAny) -> QConfigMapping:
"""
Set the QConfig for modules matching the given module name.
If the QConfig for an existing module name was already set, the new QConfig will override the old one.
"""
self.module_name_qconfigs[module_name] = qconfig
return self
def set_module_name_object_type_order(
self,
module_name: str,
object_type: Callable,
index: int,
qconfig: QConfigAny) -> QConfigMapping:
"""
Set the QConfig for modules matching a combination of the given module name, object type,
and the index at which the module appears.
If the QConfig for an existing (module name, object type, index) was already set, the new QConfig
will override the old one.
"""
self.module_name_object_type_order_qconfigs[(module_name, object_type, index)] = qconfig
return self
# TODO: remove this
def to_dict(self) -> Dict[str, Any]:
"""
Convert this `QConfigMapping` to a dictionary with the following keys:
"" (for global QConfig)
"object_type"
"module_name_regex"
"module_name"
"module_name_object_type_order"
The values of this dictionary are lists of tuples.
"""
return {
GLOBAL_DICT_KEY: self.global_qconfig,
OBJECT_TYPE_DICT_KEY: list(self.object_type_qconfigs.items()),
MODULE_NAME_REGEX_DICT_KEY: list(self.module_name_regex_qconfigs.items()),
MODULE_NAME_DICT_KEY: list(self.module_name_qconfigs.items()),
MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY: [
(*k, v) for k, v in self.module_name_object_type_order_qconfigs.items()
],
}
# TODO: remove this
@classmethod
def from_dict(cls, qconfig_dict: Dict[str, Any]) -> QConfigMapping:
"""
Create a `QConfigMapping` from a dictionary with the following keys (all optional):
"" (for global QConfig)
"object_type"
"module_name_regex"
"module_name"
"module_name_object_type_order"
The values of this dictionary are expected to be lists of tuples.
"""
conf = cls()
if GLOBAL_DICT_KEY in qconfig_dict:
conf.set_global(qconfig_dict[GLOBAL_DICT_KEY])
for object_type, qconfig in qconfig_dict.get(OBJECT_TYPE_DICT_KEY, []):
conf.set_object_type(object_type, qconfig)
for module_name_regex, qconfig in qconfig_dict.get(MODULE_NAME_REGEX_DICT_KEY, []):
conf.set_module_name_regex(module_name_regex, qconfig)
for module_name, qconfig in qconfig_dict.get(MODULE_NAME_DICT_KEY, []):
conf.set_module_name(module_name, qconfig)
for module_name, object_type, index, qconfig in qconfig_dict.get(MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY, []):
conf.set_module_name_object_type_order(module_name, object_type, index, qconfig)
return conf
|
pytorch-master
|
torch/ao/quantization/qconfig_mapping.py
|
from torch import nn
class QuantStub(nn.Module):
r"""Quantize stub module, before calibration, this is same as an observer,
it will be swapped as `nnq.Quantize` in `convert`.
Args:
qconfig: quantization configuration for the tensor,
if qconfig is not provided, we will get qconfig from parent modules
"""
def __init__(self, qconfig=None):
super(QuantStub, self).__init__()
if qconfig:
self.qconfig = qconfig
def forward(self, x):
return x
class DeQuantStub(nn.Module):
r"""Dequantize stub module, before calibration, this is same as identity,
this will be swapped as `nnq.DeQuantize` in `convert`.
Args:
qconfig: quantization configuration for the tensor,
if qconfig is not provided, we will get qconfig from parent modules
"""
def __init__(self, qconfig=None):
super(DeQuantStub, self).__init__()
if qconfig:
self.qconfig = qconfig
def forward(self, x):
return x
class QuantWrapper(nn.Module):
r"""A wrapper class that wraps the input module, adds QuantStub and
DeQuantStub and surround the call to module with call to quant and dequant
modules.
This is used by the `quantization` utility functions to add the quant and
dequant modules, before `convert` function `QuantStub` will just be observer,
it observes the input tensor, after `convert`, `QuantStub`
will be swapped to `nnq.Quantize` which does actual quantization. Similarly
for `DeQuantStub`.
"""
quant: QuantStub
dequant: DeQuantStub
module: nn.Module
def __init__(self, module):
super(QuantWrapper, self).__init__()
qconfig = module.qconfig if hasattr(module, 'qconfig') else None
self.add_module('quant', QuantStub(qconfig))
self.add_module('dequant', DeQuantStub(qconfig))
self.add_module('module', module)
self.train(module.training)
def forward(self, X):
X = self.quant(X)
X = self.module(X)
return self.dequant(X)
|
pytorch-master
|
torch/ao/quantization/stubs.py
|
"""
Utils shared by different modes of quantization (eager/graph)
"""
import warnings
import functools
import torch
from torch.ao.quantization.quant_type import QuantType
from typing import Tuple, Any, Union, Callable, Dict, Optional
from torch.nn.utils.parametrize import is_parametrized
from collections import OrderedDict
from inspect import signature
from inspect import getfullargspec
# Type for fusion patterns, it can be more complicated than the following actually,
# see pattern.md for docs
# TODO: not sure if typing supports recursive data types
Pattern = Union[Callable, Tuple[Callable, Callable], Tuple[Callable, Tuple[Callable, Callable]], Any]
Pattern.__module__ = "torch.ao.quantization.utils"
# TODO: maybe rename this to MatchInputNode
class MatchAllNode:
""" A node pattern that matches all nodes, used in defining
fusion patterns in FX Graph Mode Quantization
"""
pass
module_type_list = {
torch.nn.ReLU,
torch.nn.ReLU6,
torch.nn.AdaptiveAvgPool1d,
torch.nn.AdaptiveAvgPool2d,
torch.nn.AdaptiveAvgPool3d,
torch.nn.AvgPool1d,
torch.nn.AvgPool2d,
torch.nn.AvgPool3d,
torch.nn.MaxPool1d,
torch.nn.MaxPool2d,
torch.nn.MaxPool3d,
torch.nn.Identity,
torch.nn.Hardsigmoid,
torch.nn.Sigmoid,
torch.nn.Tanh,
}
func_list = {
torch.nn.functional.adaptive_avg_pool1d,
torch.nn.functional.adaptive_avg_pool2d,
torch.nn.functional.adaptive_avg_pool3d,
torch.nn.functional.elu,
torch.nn.functional.hardswish,
torch.nn.functional.instance_norm,
torch.nn.functional.layer_norm,
torch.nn.functional.leaky_relu,
torch.nn.functional.silu,
torch.nn.functional.mish,
torch.nn.functional.dropout,
torch.nn.functional.max_pool1d,
torch.nn.functional.max_pool2d,
torch.nn.functional.max_pool3d,
torch.nn.functional.relu,
torch.nn.functional.hardtanh,
torch.nn.functional.hardtanh_,
torch.nn.functional.hardsigmoid,
torch.nn.functional.sigmoid,
torch.transpose,
torch.repeat_interleave,
torch.sigmoid,
torch.squeeze,
torch.stack,
torch.sum,
torch.tanh,
torch.unsqueeze,
torch.cat,
}
method_list = {
torch.mean,
'relu',
'relu_',
'contiguous',
'detach',
'detach_',
'hardsigmoid',
'hardsigmoid_',
'permute',
'repeat',
'repeat_interleave',
'reshape',
'resize_',
'shape',
'sigmoid',
'sigmoid_',
'size',
'squeeze',
'squeeze_',
'tanh',
'tanh_',
'transpose',
'unsqueeze',
'unsqueeze_',
'view',
}
# TODO: not used now, remove
def check_node(node, modules):
# TODO: reuse is_fixed_qparam_node after we move this function to _lower_to_native_backend.py
is_call_function = node.op == "call_function" and node.target in func_list
is_call_method = node.op == "call_method" and node.target in method_list
is_call_module = node.op == "call_module" and type(modules[str(node.target)]) in module_type_list
return is_call_function, is_call_method, is_call_module
def get_combined_dict(default_dict, additional_dict):
d = default_dict.copy()
d.update(additional_dict)
return d
def is_per_tensor(qscheme):
return qscheme == torch.per_tensor_affine or \
qscheme == torch.per_tensor_symmetric
def is_per_channel(qscheme):
return qscheme in [torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric]
def getattr_from_fqn(obj: Any, fqn: str) -> Any:
"""
Given an obj and a fqn such as "foo.bar.baz", returns gm.foo.bar.baz.
"""
return functools.reduce(getattr, fqn.split("."), obj)
def get_qparam_dict(observer_or_fake_quant):
qscheme = observer_or_fake_quant.qscheme if hasattr(observer_or_fake_quant, "qscheme") else None
dtype = observer_or_fake_quant.dtype
qparams = {"qscheme": qscheme, "dtype": dtype}
if not qscheme:
return qparams
if is_per_tensor(qscheme):
qscheme = torch.per_tensor_affine
elif is_per_channel(qscheme):
# change symmetric to affine since we do not have symmetric
# quantized Tensor
if qscheme == torch.per_channel_symmetric:
qscheme = torch.per_channel_affine
qparams["axis"] = observer_or_fake_quant.ch_axis
else:
raise RuntimeError(f"Unrecognized qscheme: {qscheme}")
# update qscheme, since we don't have symmetric quant qscheme
# in quantized Tensor
qparams["qscheme"] = qscheme
scale, zero_point = observer_or_fake_quant.calculate_qparams()
qparams["scale"] = scale
qparams["zero_point"] = zero_point
return qparams
def get_swapped_custom_module_class(custom_module, custom_module_class_mapping, qconfig):
""" Get the observed/quantized custom module class that we need
to swap `custom_module` to
Input:
custom_module: input, can be an instance of either a float or observed custom module
custom_module_class_mapping: the float to observed or observed to quantized custom module class mapping
qconfig: qconfig configured for the custom module
Output:
corresponding observed/quantized custom module class for input custom module instance
"""
quant_type = get_quant_type(qconfig)
class_mapping = custom_module_class_mapping.get(quant_type, {})
assert type(custom_module) in class_mapping, "did not find corresponding observed " \
"module class for {} in mapping: {}".format(type(custom_module), class_mapping)
return class_mapping[type(custom_module)]
def activation_dtype(qconfig):
assert qconfig is not None
activation = qconfig.activation()
return activation.dtype
def weight_dtype(qconfig):
assert qconfig is not None
weight = qconfig.weight()
return weight.dtype
def activation_is_statically_quantized(qconfig):
""" Given a qconfig, decide if the activation needs to be
quantized or not, this includes quantizing to quint8, qint8 and float16
"""
return activation_dtype(qconfig) in [torch.quint8, torch.qint8, torch.float16]
def activation_is_dynamically_quantized(qconfig):
""" Given a qconfig, decide if the activation needs to be
dynamically quantized or not, this includes dynamically quantizing to
quint8, qint8 and float16
"""
activation_dtype, _, activation_compute_dtype = \
get_qconfig_dtypes(qconfig)
return activation_dtype == torch.float and \
activation_compute_dtype in [torch.quint8, torch.qint8, torch.float16]
def activation_is_int8_quantized(qconfig):
""" Given a qconfig, decide if the activation needs to be
quantized to int8 or not, this includes quantizing to quint8, qint8
"""
return activation_dtype(qconfig) in [torch.quint8, torch.qint8]
def activation_is_int32_quantized(qconfig):
""" Given a qconfig, decide if the activation needs to be
quantized to int32 or not
"""
return activation_dtype(qconfig) == torch.qint32
def weight_is_quantized(qconfig):
""" Given a qconfig, decide if the weight needs to be
quantized or not
"""
return weight_dtype(qconfig) in [torch.quint8, torch.qint8, torch.float16, torch.quint4x2]
def weight_is_statically_quantized(qconfig):
""" Given a qconfig, decide if the weight needs to be statically
quantized or not
"""
return weight_dtype(qconfig) in [torch.quint8, torch.qint8]
def op_is_int8_dynamically_quantized(qconfig) -> bool:
""" Given a qconfig, returns True if this op is using int8 dynamic
quantization
"""
activation_dtype, weight_dtype, activation_compute_dtype = \
get_qconfig_dtypes(qconfig)
return (
activation_dtype is torch.float and
# for now, the lines below assume fbgemm or qnnpack
weight_dtype is torch.qint8 and
activation_compute_dtype is torch.quint8
)
def get_qconfig_dtypes(qconfig):
r""" returns the qconfig tuple for qconfig:
(activation_dtype, weight_dtype, activation_compute_dtype)
"""
assert qconfig is not None
activation = qconfig.activation()
weight = qconfig.weight()
compute_dtype = activation.compute_dtype if hasattr(activation, 'compute_dtype') else None
return (activation.dtype, weight.dtype, compute_dtype)
def get_quant_type(qconfig):
assert qconfig is not None
activation = qconfig.activation()
weight = qconfig.weight()
static_dtypes = [torch.quint8, torch.qint8, torch.quint4x2]
if weight.dtype in static_dtypes:
if activation.dtype in static_dtypes:
return QuantType.STATIC
elif hasattr(activation, 'compute_dtype') and activation.compute_dtype in static_dtypes:
return QuantType.DYNAMIC
else:
return QuantType.WEIGHT_ONLY
if weight.dtype == torch.float16:
if activation.dtype == torch.float:
return QuantType.DYNAMIC
elif activation.dtype == torch.float16:
return QuantType.STATIC
raise Exception("Unrecognized dtype combination in get_quant_type: activation({}),"
"weight({})".format(activation.dtype, weight.dtype))
def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool:
""" Checks if the given minimum and maximum values are valid, meaning that
they exist and the min value is less than the max value.
"""
if min_val.numel() == 0 or max_val.numel() == 0:
warnings.warn(
"must run observer before calling calculate_qparams. " +
"Returning default values."
)
return False
if min_val.dim() == 0 or max_val.dim() == 0:
if min_val == float("inf") and max_val == float("-inf"):
warnings.warn(
"must run observer before calling calculate_qparams. " +
"Returning default values."
)
return False
assert min_val <= max_val, "min {} should be less than max {}".format(
min_val, max_val
)
else:
assert torch.all(
min_val <= max_val
), "min {} should be less than max {}".format(min_val, max_val)
return True
def calculate_qmin_qmax(quant_min: int, quant_max: int, has_customized_qrange: bool, dtype: torch.dtype,
reduce_range: bool) -> Tuple[int, int]:
r"""Calculates actual qmin and qmax based on the quantization range,
observer datatype and if range is reduced.
"""
# TODO(jerryzh): Figure out why custom quant_min/quant_max are still adjusted.
if has_customized_qrange:
# This initialization here is to be resolve TorchScript compilation issues and allow
# using of refinement to decouple initial_qmin and initial_qmax from quantization range.
# The actual values of initial_qmin and initial_qmax will be reset below.
if dtype == torch.qint32:
initial_quant_min, initial_quant_max = 0, 2**31 - 1
else:
initial_quant_min, initial_quant_max = 0, 255
# The following assignment of self.qmin and self.qmax to the local variables and the if check refine the
# attribute from Optional valid integers for use, based on TorchScript's requirements.
custom_quant_min, custom_quant_max = quant_min, quant_max
if custom_quant_min is not None and custom_quant_max is not None:
initial_quant_min, initial_quant_max = (
custom_quant_min,
custom_quant_max,
)
qrange_len = initial_quant_max - initial_quant_min + 1
if dtype == torch.qint8:
assert (
0 < qrange_len <= 256
), "quantization range should be positive and not exceed the maximum bit range (=256)."
elif dtype == torch.qint32:
assert (
0 < qrange_len <= 2**31
), "quantization range should be positive and not exceed the maximum bit range (=4294967296)."
if reduce_range:
quant_min, quant_max = quant_min // 2, quant_max // 2
else:
# Fallback onto default 8-bit qmin and qmax calculation if dynamic range is not used.
if dtype == torch.qint8:
if reduce_range:
quant_min, quant_max = -64, 63
else:
quant_min, quant_max = -128, 127
elif dtype == torch.quint8:
if reduce_range:
quant_min, quant_max = 0, 127
else:
quant_min, quant_max = 0, 255
elif dtype == torch.qint32:
quant_min, quant_max = -1 * (2 ** 31), (2 ** 31) - 1
else:
quant_min, quant_max = 0, 15
return quant_min, quant_max
def _parent_name(target):
"""
Turn 'foo.bar' into ['foo', 'bar']
"""
r = target.rsplit('.', 1)
if len(r) == 1:
return '', r[0]
else:
return r[0], r[1]
def has_no_children_ignoring_parametrizations(module):
"""
Checks if module._modules is empty or
if module is a parametrization, checks that module._modules only has
the 'parametrizations' module
"""
if len(module._modules) == 0:
return True
elif is_parametrized(module):
return len(module._modules) == 1 and 'parametrizations' in module._modules
else:
return False
def _get_path_of_module(root: torch.nn.Module, submodule: torch.nn.Module) -> Optional[str]:
""" Get the path (fully qualified name) of a submodule
Example::
>> class M(torch.nn.Module):
def __init__(self):
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
>> m = M()
>> l = m.linear
>> _get_path_of_module(m, l)
"linear"
"""
for n, p in root.named_modules():
if submodule is p:
return n
return None
def _get_signature_locals(f: Callable, loc: Dict[str, Any]) -> Dict[str, Any]:
""" Get local keyword arguments
Example::
>> def f(self, a, b=9):
pass
>> loc = {"a": 6, "c": 7}
>> _get_signature_locals(f, loc)
{"a": 6}
"""
return {k: v for k, v in loc.items() if k in signature(f).parameters}
def _get_default_kwargs(f: Callable) -> "OrderedDict[str, Any]":
""" Get all default keyword arguments from function signature
Example::
>> def f(self, a, b=9):
pass
>> _get_default_kwargs(f)
{"b": 9}
"""
kwargs = {}
for name, param in signature(f).parameters.items():
if param.default is not param.empty:
kwargs[name] = param.default
elif param.kind is param.VAR_POSITIONAL:
kwargs[name] = ()
elif param.kind is param.VAR_KEYWORD:
kwargs[name] = {}
return OrderedDict(kwargs)
def _normalize_kwargs(func: Callable, loc: Dict[str, Any]) -> "OrderedDict[str, Any]":
""" Given a function and local function arguments, normalize the keyword
arguments by filling in default arguments from function signature
Example::
>> def f(self, key1=3, key2=3):
pass
>> loc = {"key2": 6}
>> _normalize_kwargs(f, loc)
{"key1": 3, "key2": 6}
"""
default_kwargs = _get_default_kwargs(func)
local_kwargs = _get_signature_locals(func, loc)
normalized_kwargs = default_kwargs.copy()
for attr, val in local_kwargs.items():
if attr in normalized_kwargs:
# override the default keyword arguments
normalized_kwargs[attr] = val
return normalized_kwargs
def _get_num_pos_args(f: Callable) -> int:
""" Get number of positional args for a function
Example::
>> def f(self, key1=3, key2=3):
pass
>> _get_num_pos_args(f)
3
"""
return len(getfullargspec(f).args)
def get_fqn_to_example_inputs(
model: torch.nn.Module,
example_inputs: Tuple[Any, ...]
) -> Dict[str, Tuple[Any, ...]]:
""" Given a model and its example inputs, return a dictionary from
fully qualified name of submodules to example_inputs for that submodule,
e.g. {"linear1": (tensor1,), "linear2": (tensor2,), "sub": (tensor3,),
"sub.linear1": (tensor4,), ...}
Used to make quantizing submodules easier now that FX Graph Mode Quantization requries
example inputs.
Also works for keyword arguments with default values, we would flatten keyword
arguments as positional arguments and fill in the missing keyword args with default
values, e.g. if we have a forward function:
def forward(self, x, key1=3, key2=3):
...
and we call it with self.submodule(x, key2=6)
we'll get example_inputs: (x, 3, 6)
user can also override `key1` with positional arguments as well:
for self.submodule(x, 5, key2=6)
we'll get: (x, 5, 6)
variable positional arguments and variable positional keyword arguments in forward
function are not supported currently, so please make sure no submodules is using
them.
"""
root = model
fqn_to_example_inputs = {}
def _patched_module_call(self, *args, **kwargs):
submodule_example_inputs = list(args).copy()
normalized_kwargs = _normalize_kwargs(self.forward, kwargs)
# minus 1 to skipping counting `self`
num_args = _get_num_pos_args(self.forward) - 1
num_to_pop = num_args - len(submodule_example_inputs)
while num_to_pop and normalized_kwargs:
normalized_kwargs.popitem(last=False)
num_to_pop -= 1
submodule_example_inputs.extend(normalized_kwargs.values())
submodule_example_inputs_tuple = tuple(submodule_example_inputs)
fqn = _get_path_of_module(root, self)
if fqn is not None:
fqn_to_example_inputs[fqn] = submodule_example_inputs_tuple
return orig_module_call(self, *args, **kwargs)
orig_module_call = torch.nn.Module.__call__
torch.nn.Module.__call__ = _patched_module_call
try:
model(*example_inputs)
finally:
# restore the module call even if there is an exception
torch.nn.Module.__call__ = orig_module_call
return fqn_to_example_inputs
__all__ = [
"Pattern",
"MatchAllNode",
"check_node",
"get_combined_dict",
"is_per_tensor",
"is_per_channel",
"getattr_from_fqn",
"get_qparam_dict",
"get_swapped_custom_module_class",
"activation_dtype",
"weight_dtype",
"activation_is_statically_quantized",
"activation_is_dynamically_quantized",
"activation_is_int8_quantized",
"activation_is_int32_quantized",
"weight_is_quantized",
"weight_is_statically_quantized",
"op_is_int8_dynamically_quantized",
"get_qconfig_dtypes",
"get_quant_type",
"check_min_max_valid",
"calculate_qmin_qmax",
"has_no_children_ignoring_parametrizations",
"get_fqn_to_example_inputs",
]
|
pytorch-master
|
torch/ao/quantization/utils.py
|
import torch.nn as nn
import torch.nn.intrinsic as nni
from typing import Union, Callable, Tuple, Dict, Optional, Type
from torch.ao.quantization.utils import Pattern
from torch.ao.quantization.utils import get_combined_dict
from torch.ao.quantization.utils import MatchAllNode
import itertools
def fuse_conv_bn(is_qat, conv, bn):
r"""Given the conv and bn modules, fuses them and returns the fused module
Args:
is_qat: a flag for whether we are using quantization aware training fusion
or post training quantization fusion
conv: Module instance of type conv2d/conv3d
bn: Spatial BN instance that needs to be fused with the conv
Examples::
>>> m1 = nn.Conv2d(10, 20, 3)
>>> b1 = nn.BatchNorm2d(20)
>>> # xdoctest: +SKIP
>>> m2 = fuse_conv_bn(m1, b1)
"""
assert(conv.training == bn.training),\
"Conv and BN both must be in the same mode (train or eval)."
fused_module_class_map = {
nn.Conv1d: nni.ConvBn1d,
nn.Conv2d: nni.ConvBn2d,
nn.Conv3d: nni.ConvBn3d,
}
if is_qat:
assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'
assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True'
fused_module_class = fused_module_class_map.get((type(conv)), None)
if fused_module_class is not None:
return fused_module_class(conv, bn)
else:
raise NotImplementedError("Cannot fuse train modules: {}".format((conv, bn)))
else:
return nn.utils.fuse_conv_bn_eval(conv, bn)
def fuse_conv_bn_relu(is_qat, conv, bn, relu):
r"""Given the conv and bn modules, fuses them and returns the fused module
Args:
is_qat: a flag for whether we are using quantization aware training fusion
or post training quantization fusion
conv: Module instance of type conv2d/conv3d
bn: Spatial BN instance that needs to be fused with the conv
Examples::
>>> m1 = nn.Conv2d(10, 20, 3)
>>> b1 = nn.BatchNorm2d(20)
>>> r1 = nn.ReLU(inplace=False)
>>> # xdoctest: +SKIP
>>> m2 = fuse_conv_bn_relu(m1, b1, r1)
"""
assert(conv.training == bn.training == relu.training),\
"Conv and BN both must be in the same mode (train or eval)."
fused_module : Optional[Type[nn.Sequential]] = None
if is_qat:
map_to_fused_module_train = {
nn.Conv1d: nni.ConvBnReLU1d,
nn.Conv2d: nni.ConvBnReLU2d,
nn.Conv3d: nni.ConvBnReLU3d,
}
assert bn.num_features == conv.out_channels, 'Output channel of Conv must match num_features of BatchNorm'
assert bn.affine, 'Only support fusing BatchNorm with affine set to True'
assert bn.track_running_stats, 'Only support fusing BatchNorm with tracking_running_stats set to True'
fused_module = map_to_fused_module_train.get(type(conv), None)
if fused_module is not None:
return fused_module(conv, bn, relu)
else:
raise NotImplementedError("Cannot fuse train modules: {}".format((conv, bn, relu)))
else:
map_to_fused_module_eval = {
nn.Conv1d: nni.ConvReLU1d,
nn.Conv2d: nni.ConvReLU2d,
nn.Conv3d: nni.ConvReLU3d,
}
fused_module = map_to_fused_module_eval.get(type(conv), None)
if fused_module is not None:
fused_conv = nn.utils.fusion.fuse_conv_bn_eval(conv, bn)
return fused_module(fused_conv, relu)
else:
raise NotImplementedError("Cannot fuse eval modules: {}".format((conv, bn, relu)))
def fuse_linear_bn(is_qat, linear, bn):
r"""Given the linear and bn modules, fuses them and returns the fused module
Args:
is_qat: a flag for whether we are using quantization aware training fusion
or post training quantization fusion
linear: Module instance of type Linear
bn: BatchNorm1d instance that needs to be fused with the linear layer
Examples::
>>> m1 = nn.Linear(20, 10)
>>> b1 = nn.BatchNorm1d(10)
>>> # xdoctest: +SKIP
>>> m2 = fuse_linear_bn(m1, b1)
"""
assert(linear.training == bn.training),\
"Linear and BN both must be in the same mode (train or eval)."
if is_qat:
assert bn.num_features == linear.out_features,\
"Output features of Linear must match num_features of BatchNorm1d"
assert bn.affine, "Only support fusing BatchNorm1d with affine set to True"
assert bn.track_running_stats,\
"Only support fusing BatchNorm1d with tracking_running_stats set to True"
return nni.LinearBn1d(linear, bn)
else:
return nn.utils.fusion.fuse_linear_bn_eval(linear, bn)
def fuse_convtranspose_bn(is_qat, convt, bn):
r"""Given ConvTranspose and bn modules, fuses them and returns the fused module
Args:
convt: Module instance of type ConvTransposeNd
bn: BatchNormNd instance that needs to be fused with the linear layer.
batch norm N should match the ConvTranspose N
Examples::
>>> m1 = nn.ConvTranspose2d(10, 20, 3)
>>> b1 = nn.BatchNorm2d(20)
>>> # xdoctest: +SKIP
>>> m2 = fuse_convtranspose_bn(m1, b1)
"""
assert(convt.training == bn.training),\
"ConvTranspose and BN both must be in the same mode (train or eval)."
if is_qat:
raise Exception("Fusing ConvTranspose+BatchNorm not yet supported in QAT.")
else:
return nn.utils.fusion.fuse_conv_bn_eval(convt, bn, transpose=True)
def sequential_wrapper2(sequential):
""" Given a sequential class for two modules, return a function that takes
is_qat, and then two modules as argument, that ignores the is_qat flag
and always returns the sequential that combines the two input modules
"""
def fuser_method(is_qat, m1, m2):
return sequential(m1, m2)
return fuser_method
DEFAULT_OP_LIST_TO_FUSER_METHOD: Dict[Tuple, Union[nn.Sequential, Callable]] = {
(nn.Conv1d, nn.BatchNorm1d): fuse_conv_bn,
(nn.Conv1d, nn.BatchNorm1d, nn.ReLU): fuse_conv_bn_relu,
(nn.Conv2d, nn.BatchNorm2d): fuse_conv_bn,
(nn.Conv2d, nn.BatchNorm2d, nn.ReLU): fuse_conv_bn_relu,
(nn.Conv3d, nn.BatchNorm3d): fuse_conv_bn,
(nn.Conv3d, nn.BatchNorm3d, nn.ReLU): fuse_conv_bn_relu,
(nn.Conv1d, nn.ReLU): sequential_wrapper2(nni.ConvReLU1d),
(nn.Conv2d, nn.ReLU): sequential_wrapper2(nni.ConvReLU2d),
(nn.Conv3d, nn.ReLU): sequential_wrapper2(nni.ConvReLU3d),
(nn.Linear, nn.BatchNorm1d): fuse_linear_bn,
(nn.Linear, nn.ReLU): sequential_wrapper2(nni.LinearReLU),
(nn.BatchNorm2d, nn.ReLU): sequential_wrapper2(nni.BNReLU2d),
(nn.BatchNorm3d, nn.ReLU): sequential_wrapper2(nni.BNReLU3d),
(nn.ConvTranspose1d, nn.BatchNorm1d): fuse_convtranspose_bn,
(nn.ConvTranspose2d, nn.BatchNorm2d): fuse_convtranspose_bn,
(nn.ConvTranspose3d, nn.BatchNorm3d): fuse_convtranspose_bn,
}
def get_fuser_method(op_list, additional_fuser_method_mapping=None):
''' Get fuser method for the given list of module types,
return None if fuser method does not exist
'''
if additional_fuser_method_mapping is None:
additional_fuser_method_mapping = dict()
all_mappings = get_combined_dict(DEFAULT_OP_LIST_TO_FUSER_METHOD,
additional_fuser_method_mapping)
fuser_method = all_mappings.get(op_list, None)
assert fuser_method is not None, "did not find fuser method for: {} ".format(op_list)
return fuser_method
def reverse_sequential_wrapper2(sequential):
""" Given a sequential class for two modules, return a function that takes
is_qat, and then two modules as argument, that ignores the is_qat flag
and always returns the sequential that combines the two input modules, with
the order of two inputs reversed
"""
def fuser_method(is_qat, m1, m2):
return sequential(m2, m1)
return fuser_method
def reverse2(f):
def reversed(is_qat, x, y):
return f(is_qat, y, x)
return reversed
def reverse3(f):
def reversed(is_qat, x, w):
y, z = w
return f(is_qat, z, y, x)
return reversed
DEFAULT_PATTERN_TO_FUSER_METHOD: Dict[Pattern, Union[nn.Sequential, Callable]] = {
(nn.BatchNorm1d, nn.Conv1d): reverse2(fuse_conv_bn),
(nn.ReLU, (nn.BatchNorm1d, nn.Conv1d)): reverse3(fuse_conv_bn_relu),
(nn.BatchNorm2d, nn.Conv2d): reverse2(fuse_conv_bn),
(nn.ReLU, (nn.BatchNorm2d, nn.Conv2d)): reverse3(fuse_conv_bn_relu),
(nn.BatchNorm3d, nn.Conv3d): reverse2(fuse_conv_bn),
(nn.ReLU, (nn.BatchNorm3d, nn.Conv3d)): reverse3(fuse_conv_bn_relu),
(nn.ReLU, nn.Conv1d): reverse_sequential_wrapper2(nni.ConvReLU1d),
(nn.ReLU, nn.Conv2d): reverse_sequential_wrapper2(nni.ConvReLU2d),
(nn.ReLU, nn.Conv3d): reverse_sequential_wrapper2(nni.ConvReLU3d),
(nn.BatchNorm1d, nn.Linear): reverse2(fuse_linear_bn),
(nn.ReLU, nn.Linear): reverse_sequential_wrapper2(nni.LinearReLU),
(nn.ReLU, nn.BatchNorm2d): reverse_sequential_wrapper2(nni.BNReLU2d),
(nn.ReLU, nn.BatchNorm3d): reverse_sequential_wrapper2(nni.BNReLU3d),
(nn.BatchNorm1d, nn.ConvTranspose1d): reverse2(fuse_convtranspose_bn),
(nn.BatchNorm2d, nn.ConvTranspose2d): reverse2(fuse_convtranspose_bn),
(nn.BatchNorm3d, nn.ConvTranspose3d): reverse2(fuse_convtranspose_bn),
}
def get_valid_patterns(op_pattern):
"""
Returns a list of valid patterns generated from the op_pattern,
since MatchAllNode can match all types of nodes,
e.g. pattern (torch.nn.Conv2d, torch.add) should also be able to match keys like
(MatchAllNode, torch.add) and (torch.nn.Conv2d, MatchAllNode)
Example Input:
(torch.add, (torch.nn.ReLU, torch.nn.Conv2d))
Example Output:
[(torch.add, (torch.nn.ReLU, torch.nn.Conv2d)),
(torch.add, (torch.nn.ReLU, MatchAllNode)),
(torch.add, (MatchAllNode, torch.nn.Conv2d)),
(torch.add, (MatchAllNode, MatchAllNode)),
(MatchAllNode, (torch.nn.ReLU, torch.nn.Conv2d)),
(MatchAllNode, (torch.nn.ReLU, MatchAllNode)),
(MatchAllNode, (MatchAllNode, torch.nn.Conv2d)),
(MatchAllNode, (MatchAllNode, MatchAllNode)),
]
"""
result = []
if isinstance(op_pattern, (tuple, list)):
sub_combs = []
for sub_pattern in op_pattern:
sub_combs.append(get_valid_patterns(sub_pattern))
result = list(itertools.product(*sub_combs))
else:
result = [op_pattern, MatchAllNode]
return result
def get_fuser_method_new(
op_pattern: Pattern,
fuser_method_mapping: Optional[Dict[Pattern, Union[nn.Sequential, Callable]]] = None):
""" This will be made defult after we deparate the get_fuser_method
Would like to implement this first and have a separate PR for deprecation
"""
if fuser_method_mapping is None:
fuser_method_mapping = DEFAULT_PATTERN_TO_FUSER_METHOD
op_patterns = get_valid_patterns(op_pattern)
fuser_method = None
for op_pattern in op_patterns:
fuser_method = fuser_method_mapping.get(op_pattern, None)
if fuser_method is not None:
break
assert fuser_method is not None, "did not find fuser method for: {} ".format(op_pattern)
return fuser_method
|
pytorch-master
|
torch/ao/quantization/fuser_method_mappings.py
|
import torch
from torch.ao.quantization.qconfig import QConfig
from torch.ao.quantization.quant_type import QuantType
from torch.jit._recursive import wrap_cpp_module
def _check_is_script_module(model):
if not isinstance(model, torch.jit.ScriptModule):
raise ValueError('input must be a script module, got: ' + str(type(model)))
def _check_forward_method(model):
if not model._c._has_method('forward'):
raise ValueError('input script module does not have forward method')
def script_qconfig(qconfig):
r"""Instantiate the activation and weight observer modules and script
them, these observer module instances will be deepcopied during
prepare_jit step.
"""
return QConfig(
activation=torch.jit.script(qconfig.activation())._c,
weight=torch.jit.script(qconfig.weight())._c)
def script_qconfig_dict(qconfig_dict):
r"""Helper function used by `prepare_jit`.
Apply `script_qconfig` for all entries in `qconfig_dict` that is
not None.
"""
return {k: script_qconfig(v) if v else None for k, v in qconfig_dict.items()}
def fuse_conv_bn_jit(model, inplace=False):
r""" Fuse conv - bn module
Works for eval model only.
Args:
model: TorchScript model from scripting or tracing
"""
torch._C._log_api_usage_once("quantization_api.quantize_jit.fuse_conv_bn_jit")
model_c = model._c
model_c = torch._C._jit_pass_fold_convbn(model_c)
if inplace:
model._reconstruct(model_c)
else:
model = wrap_cpp_module(model_c)
return model
def _prepare_jit(model, qconfig_dict, inplace=False, quant_type=QuantType.STATIC):
_check_is_script_module(model)
_check_forward_method(model)
if not all(isinstance(x, str) for x in qconfig_dict.keys()):
raise ValueError('qconfig_dict should only contain names(str) as keys.')
scripted_qconfig_dict = script_qconfig_dict(qconfig_dict)
model = fuse_conv_bn_jit(model, inplace)
model_c = torch._C._jit_pass_insert_observers(model._c,
'forward',
scripted_qconfig_dict,
inplace,
quant_type)
if inplace:
model._reconstruct(model_c)
else:
model = wrap_cpp_module(model_c)
return model
def prepare_jit(model, qconfig_dict, inplace=False):
torch._C._log_api_usage_once("quantization_api.quantize_jit.prepare_jit")
return _prepare_jit(model, qconfig_dict, inplace, quant_type=QuantType.STATIC)
def prepare_dynamic_jit(model, qconfig_dict, inplace=False):
torch._C._log_api_usage_once("quantization_api.quantize_jit.prepare_dynamic_jit")
return _prepare_jit(model, qconfig_dict, inplace, quant_type=QuantType.DYNAMIC)
def _convert_jit(model, inplace=False, debug=False, quant_type=QuantType.STATIC,
preserved_attrs=None):
_check_is_script_module(model)
model.eval()
model_c = model._c
model_c = torch._C._jit_pass_insert_quant_dequant(model_c, 'forward', inplace, debug, quant_type)
if not debug:
is_xpu = all(p.device.type == 'xpu' for p in model.parameters())
if not is_xpu:
# Moving model parameters to CPU since quantized operators
# are only supported on CPU and XPU right now
model.cpu()
if preserved_attrs is None:
preserved_attrs = []
model_c = torch._C._jit_pass_quant_finalize(model_c, quant_type, preserved_attrs)
if inplace:
model._reconstruct(model_c)
else:
model = wrap_cpp_module(model_c)
torch._C._jit_pass_constant_propagation(model.graph)
torch._C._jit_pass_dce(model.graph)
return model
def convert_jit(model, inplace=False, debug=False, preserved_attrs=None):
torch._C._log_api_usage_once("quantization_api.quantize_jit.convert_jit")
return _convert_jit(model, inplace, debug, quant_type=QuantType.STATIC, preserved_attrs=preserved_attrs)
def convert_dynamic_jit(model, inplace=False, debug=False, preserved_attrs=None):
torch._C._log_api_usage_once("quantization_api.quantize_jit.convert_dynamic_jit")
return _convert_jit(model, inplace, debug, quant_type=QuantType.DYNAMIC, preserved_attrs=preserved_attrs)
def _quantize_jit(model, qconfig_dict, run_fn=None, run_args=None, inplace=False, debug=False, quant_type=QuantType.STATIC):
# Always do inplace convert because the Tensor is already
# copied in prepare_jit when inplace is False
if quant_type == QuantType.DYNAMIC:
model = prepare_dynamic_jit(model, qconfig_dict, inplace)
model = convert_dynamic_jit(model, True, debug)
else:
assert run_fn, "Must provide calibration function for post training static quantization"
assert run_args, "Must provide calibration dataset for post training static quantization"
model = prepare_jit(model, qconfig_dict, inplace)
run_fn(model, *run_args)
model = convert_jit(model, True, debug)
torch._C._jit_pass_constant_propagation(model.graph)
torch._C._jit_pass_dce(model.graph)
return model
def quantize_jit(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False):
r"""Quantize the input float TorchScript model with
post training static quantization.
First it will prepare the model for calibration, then it calls
`run_fn` which will run the calibration step, after that we will
convert the model to a quantized model.
Args:
`model`: input float TorchScript model
`qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and
qconfig for that module as value, empty key means the qconfig will be applied
to whole model unless it's overwritten by more specific configurations, the
qconfig for each module is either found in the dictionary or fallback to
the qconfig of parent module.
Right now qconfig_dict is the only way to configure how the model is quantized,
and it is done in the granularity of module, that is, we only support one type
of qconfig for each torch.nn.Module, and the qconfig for sub module will
override the qconfig for parent module, empty string means global configuration.
`run_fn`: a calibration function for calibrating the prepared model
`run_args`: positional arguments for `run_fn`
`inplace`: carry out model transformations in-place, the original module is
mutated
`debug`: flag for producing a debug friendly model (preserve weight attribute)
Return:
Quantized TorchSciprt model.
Example:
```python
import torch
from torch.ao.quantization import get_default_qconfig
from torch.ao.quantization import quantize_jit
ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input)
qconfig = get_default_qconfig('fbgemm')
def calibrate(model, data_loader):
model.eval()
with torch.no_grad():
for image, target in data_loader:
model(image)
quantized_model = quantize_jit(
ts_model,
{'': qconfig},
calibrate,
[data_loader_test])
```
"""
torch._C._log_api_usage_once("quantization_api.quantize_jit.quantize_jit")
return _quantize_jit(model, qconfig_dict, run_fn, run_args, inplace, debug, quant_type=QuantType.STATIC)
def quantize_dynamic_jit(model, qconfig_dict, inplace=False, debug=False):
r"""Quantize the input float TorchScript model with
post training dynamic quantization.
Currently only qint8 quantization of torch.nn.Linear is supported.
Args:
`model`: input float TorchScript model
`qconfig_dict`: qconfig_dict is a dictionary with names of sub modules as key and
qconfig for that module as value, please see detailed
descriptions in :func:`~torch.ao.quantization.quantize_jit`
`inplace`: carry out model transformations in-place, the original module is
mutated
`debug`: flag for producing a debug friendly model (preserve weight attribute)
Return:
Quantized TorchSciprt model.
Example:
```python
import torch
from torch.ao.quantization import per_channel_dynamic_qconfig
from torch.ao.quantization import quantize_dynmiac_jit
ts_model = torch.jit.script(float_model.eval()) # or torch.jit.trace(float_model, input)
qconfig = get_default_qconfig('fbgemm')
def calibrate(model, data_loader):
model.eval()
with torch.no_grad():
for image, target in data_loader:
model(image)
quantized_model = quantize_dynamic_jit(
ts_model,
{'': qconfig},
calibrate,
[data_loader_test])
```
"""
torch._C._log_api_usage_once("quantization_api.quantize_jit.quantize_dynamic_jit")
return _quantize_jit(model, qconfig_dict, inplace=inplace, debug=debug, quant_type=QuantType.DYNAMIC)
|
pytorch-master
|
torch/ao/quantization/quantize_jit.py
|
import torch
from ._dbr.auto_trace import add_auto_observation, add_auto_convert
from ._dbr.fusion import get_module_fusion_fqns
from ._dbr.qconfig_mapping_utils import normalize_object_types
from .qconfig_mapping_utils import (
get_flattened_qconfig_dict,
)
from torch.ao.quantization.qconfig_mapping import QConfigMapping
from torch.ao.quantization.quantization_mappings import (
get_default_static_quant_module_mappings,
get_default_dynamic_quant_module_mappings,
)
from ._dbr.module_swap_utils import _swap_child_modules
def prepare(model, qconfig_dict, example_inputs, inplace=False, allow_list=None,
observer_non_leaf_module_list=None,
prepare_custom_config_dict=None,
fuse_modules=True):
r"""A wrapper around `torch.quantization.prepare` which prepares the
model for quantization using dynamic tracing.
Requires `qconfig_dict` (same format as prepare_fx) to specify the
quantization settings. Not all functionality is supported yet.
Requires `example_inputs` to build
the graph before calibration or quantization aware training can proceed.
Supported `prepare_custom_config_dict` keys:
* `non_traceable_module_class` - same meaning as in prepare_fx
* `output_dtypes` - expected dtypes of model outputs, must match actual
output structure.
TODO(future PR): better docblock
"""
assert example_inputs is not None, 'example_inputs must be specified'
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
# TODO: change signature to use QConfigMapping instead of qconfig_dict
qconfig_mapping = QConfigMapping.from_dict(qconfig_dict)
assert len(qconfig_mapping.module_name_regex_qconfigs) == 0, \
'qconfig_mapping.set_module_name_regex is not supported yet in define-by-run quantization'
assert len(qconfig_mapping.module_name_object_type_order_qconfigs) == 0, \
'qconfig_mapping.set_module_name_object_type_order is not supported yet in define-by-run quantization'
normalize_object_types(qconfig_mapping)
flattened_qconfig_dict = get_flattened_qconfig_dict(qconfig_mapping)
torch.quantization.propagate_qconfig_(model, flattened_qconfig_dict)
# if parts of the model are non traceable, delete qconfig from
# them so they do not get swapped
non_traceable_module_class = \
prepare_custom_config_dict.get('non_traceable_module_class', [])
for name, child in model.named_modules():
for target_cls in non_traceable_module_class:
if isinstance(child, target_cls):
for _, child_child in child.named_modules():
child_child.qconfig = None
# TODO(future PR): QAT support
if fuse_modules:
# automatically fuse modules
old_class = model.__class__
model = add_auto_observation(
model, qconfig_mapping, example_inputs,
prepare_custom_config_dict=prepare_custom_config_dict)
module_fusion_fqns = get_module_fusion_fqns(model)
if len(module_fusion_fqns):
model = torch.quantization.fuse_modules(model, module_fusion_fqns)
# Since we are reusing the auto_trace machinery to find fusion
# FQNs, we need to do some surgery to get qconfigs on modules
# after module fusion to be correct.
for _, child in model.named_modules():
if isinstance(child, torch.nn.intrinsic._FusedModule):
if hasattr(child[0], 'qconfig'):
child.qconfig = child[0].qconfig
# delete all the DBR state from the model, so add_auto_observation
# can start from a clean slate
parents_to_delete_auto_quant_state = []
for k, v in model.named_modules():
if hasattr(v, '_auto_quant_state'):
parents_to_delete_auto_quant_state.append(v)
for v in parents_to_delete_auto_quant_state:
del v._auto_quant_state
del model._fqn_to_auto_quant_state_map
for p in model.parameters():
if hasattr(p, '_qtensor_info'):
del p._qtensor_info
for b in model.buffers():
if hasattr(b, '_qtensor_info'):
del b._qtensor_info
# the model hierarchy might have changed during fusion, so we
# have to delete the cached module hook types
for k, v in model.named_modules():
if hasattr(v, '_auto_quant_module_hook_type'):
del v._auto_quant_module_hook_type
model.__class__ = old_class
# Automatically assign qconfigs for modules where the defaults do not
# work.
# TODO(future PR): clean this up and align with other APIs
for name, child in model.named_modules():
if isinstance(child, (torch.nn.Embedding, torch.nn.EmbeddingBag)):
# pass
# child.qconfig = torch.quantization.float_qparams_weight_only_qconfig
# uncomment below to unbreak attention_is_all_you_need
# TODO write up issue, maybe fix
child.qconfig = None # type: ignore[assignment]
elif isinstance(child, torch.nn.LSTM):
# TODO: fix LSTM handling in eager mode static quant and remove this
qconfig_mapping.object_type_qconfigs[torch.nn.LSTM] = None
# TODO(future PR): do the QAT module swap
assert not inplace
model = add_auto_observation(
model, qconfig_mapping, example_inputs,
prepare_custom_config_dict=prepare_custom_config_dict)
return model
def convert(model: torch.nn.Module) -> torch.nn.Module:
r"""Converts a prepared DBR quantization model to a quantized form.
TODO(future PR): better docblock
"""
static_mappings = get_default_static_quant_module_mappings()
dynamic_mappings = get_default_dynamic_quant_module_mappings()
# swap the modules
_swap_child_modules(model, static_mappings, dynamic_mappings)
# add dynamic handling for quants/dequants, functions and methods
model = add_auto_convert(model)
return model
|
pytorch-master
|
torch/ao/quantization/_quantize_dbr.py
|
import enum
__all__ = [
"QuantType",
"quant_type_to_str",
]
# Quantization type (dynamic quantization, static quantization).
# Should match the c++ enum in quantization_type.h
class QuantType(enum.IntEnum):
DYNAMIC = 0
STATIC = 1
QAT = 2
WEIGHT_ONLY = 3
_quant_type_to_str = {
QuantType.STATIC: "static",
QuantType.DYNAMIC: "dynamic",
QuantType.QAT: "qat",
QuantType.WEIGHT_ONLY: "weight_only",
}
# TODO: make this private
def quant_type_to_str(quant_type: QuantType) -> str:
return _quant_type_to_str[quant_type]
def _quant_type_from_str(name: str) -> QuantType:
for quant_type, s in _quant_type_to_str.items():
if name == s:
return quant_type
raise ValueError("Unknown QuantType name '%s'" % name)
|
pytorch-master
|
torch/ao/quantization/quant_type.py
|
from typing import Any, Dict, Optional, Set, Tuple, Union
import warnings
import torch
from torch.fx import GraphModule
from .fx.tracer import QuantizationTracer
from .fx import fuse # noqa: F401
from .fx import prepare # noqa: F401
from .fx.convert import convert
from .backend_config import ( # noqa: F401
BackendConfig,
get_tensorrt_backend_config,
)
from .fx.graph_module import ObservedGraphModule
from .fx.custom_config import (
ConvertCustomConfig,
FuseCustomConfig,
PrepareCustomConfig,
)
from .fx.utils import graph_pretty_str # noqa: F401
from .fx.utils import get_custom_module_class_keys # noqa: F401
from .fx.utils import get_skipped_module_name_and_classes
from .qconfig_mapping import QConfigMapping
def _check_is_graph_module(model: torch.nn.Module) -> None:
if not isinstance(model, GraphModule):
raise ValueError(
"input model must be a GraphModule, "
+ "Got type:"
+ str(type(model))
+ " Please make "
+ "sure to follow the tutorials."
)
def _swap_ff_with_fxff(model: torch.nn.Module) -> None:
r""" Swap FloatFunctional with FXFloatFunctional
"""
modules_to_swap = []
for name, module in model.named_children():
if isinstance(module, torch.nn.quantized.FloatFunctional):
modules_to_swap.append(name)
else:
_swap_ff_with_fxff(module)
for name in modules_to_swap:
del model._modules[name]
model._modules[name] = torch.nn.quantized.FXFloatFunctional()
def _fuse_fx(
graph_module: GraphModule,
is_qat: bool,
fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> GraphModule:
r""" Internal helper function to fuse modules in preparation for quantization
Args:
graph_module: GraphModule object from symbolic tracing (torch.fx.symbolic_trace)
"""
_check_is_graph_module(graph_module)
return fuse(
graph_module, is_qat, fuse_custom_config, backend_config) # type: ignore[operator]
class Scope(object):
""" Scope object that records the module path and the module type
of a module. Scope is used to track the information of the module
that contains a Node in a Graph of GraphModule. For example::
class Sub(torch.nn.Module):
def forward(self, x):
# This will be a call_method Node in GraphModule,
# scope for this would be (module_path="sub", module_type=Sub)
return x.transpose(1, 2)
class M(torch.nn.Module):
def __init__(self):
self.sub = Sub()
def forward(self, x):
# This will be a call_method Node as well,
# scope for this would be (module_path="", None)
x = x.transpose(1, 2)
x = self.sub(x)
return x
"""
def __init__(self, module_path: str, module_type: Any):
super().__init__()
self.module_path = module_path
self.module_type = module_type
class ScopeContextManager(object):
""" A context manager to track the Scope of Node during symbolic tracing.
When entering a forward function of a Module, we'll update the scope information of
the current module, and when we exit, we'll restore the previous scope information.
"""
def __init__(
self, scope: Scope, current_module: torch.nn.Module, current_module_path: str
):
super().__init__()
self.prev_module_type = scope.module_type
self.prev_module_path = scope.module_path
self.scope = scope
self.scope.module_path = current_module_path
self.scope.module_type = type(current_module)
def __enter__(self):
return
def __exit__(self, *args):
self.scope.module_path = self.prev_module_path
self.scope.module_type = self.prev_module_type
return
def _prepare_fx(
model: torch.nn.Module,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any]],
is_qat: bool,
example_inputs: Tuple[Any, ...],
prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None,
_equalization_config: Optional[Union[QConfigMapping, Dict[str, Any]]] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
is_standalone_module: bool = False,
) -> ObservedGraphModule:
r""" Internal helper function for prepare_fx
Args:
`model`, `qconfig_mapping`, `prepare_custom_config`, `_equalization_config`:
see docs for :func:`~torch.ao.quantization.prepare_fx`
`is_standalone_module`: a boolean flag indicates whether we are
quantizing a standalone module or not, a standalone module
is a submodule of the parent module that is not inlined in the
forward graph of the parent module,
the way we quantize standalone module is described in:
:func:`~torch.ao.quantization._prepare_standalone_module_fx`
"""
if prepare_custom_config is None:
prepare_custom_config = PrepareCustomConfig()
if _equalization_config is None:
_equalization_config = QConfigMapping()
if isinstance(prepare_custom_config, Dict):
warnings.warn(
"Passing a prepare_custom_config_dict to prepare is deprecated and will not be supported "
"in a future version. Please pass in a PrepareCustomConfig instead.")
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config)
# swap FloatFunctional with FXFloatFunctional
_swap_ff_with_fxff(model)
skipped_module_names, skipped_module_classes = \
get_skipped_module_name_and_classes(prepare_custom_config, is_standalone_module)
preserved_attributes = prepare_custom_config.preserved_attributes
# symbolically trace the model
tracer = QuantizationTracer(skipped_module_names, skipped_module_classes) # type: ignore[arg-type]
graph_module = GraphModule(model, tracer.trace(model))
for attr_name in preserved_attributes:
setattr(graph_module, attr_name, getattr(model, attr_name))
fuse_custom_config = FuseCustomConfig().set_preserved_attributes(prepare_custom_config.preserved_attributes)
graph_module = _fuse_fx(
graph_module,
is_qat,
fuse_custom_config,
backend_config)
prepared = prepare(
graph_module,
qconfig_mapping,
is_qat,
tracer.node_name_to_scope,
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config,
_equalization_config=_equalization_config,
backend_config=backend_config,
is_standalone_module=is_standalone_module,
) # type: ignore[operator]
for attr_name in preserved_attributes:
setattr(prepared, attr_name, getattr(model, attr_name))
return prepared
def _prepare_standalone_module_fx(
model: torch.nn.Module,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any]],
is_qat: bool,
example_inputs: Tuple[Any, ...],
prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> GraphModule:
r""" [Internal use only] Prepare a standalone module, so that it can be used when quantizing the
parent module.
standalone_module means it a submodule that is not inlined in parent module,
and will be quantized separately as one unit.
How the standalone module is observed is specified by `input_quantized_idxs` and
`output_quantized_idxs` in the prepare_custom_config for the standalone module
Returns:
* model(GraphModule): prepared standalone module. It has these attributes:
* `_standalone_module_input_quantized_idxs(List[Int])`: a list of
indexes for the graph input that is expected to be quantized,
same as input_quantized_idxs configuration provided
for the standalone module
* `_standalone_module_output_quantized_idxs(List[Int])`: a list of
indexs for the graph output that is quantized
same as input_quantized_idxs configuration provided
for the standalone module
"""
return _prepare_fx(
model,
qconfig_mapping,
is_qat,
example_inputs,
prepare_custom_config,
backend_config=backend_config,
is_standalone_module=True,
)
def fuse_fx(
model: torch.nn.Module,
fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> GraphModule:
r""" Fuse modules like conv+bn, conv+bn+relu etc, model must be in eval mode.
Fusion rules are defined in torch.quantization.fx.fusion_pattern.py
Args:
* `model`: a torch.nn.Module model
* `fuse_custom_config`: custom configurations for fuse_fx.
See :class:`~torch.ao.quantization.fx.custom_config.FuseCustomConfig` for more detail::
from torch.ao.quantization.fx.custom_config import FuseCustomConfig
fuse_custom_config = FuseCustomConfig().set_preserved_attributes(["preserved_attr"])
Example::
from torch.ao.quantization import fuse_fx
m = Model().eval()
m = fuse_fx(m)
"""
if fuse_custom_config is None:
fuse_custom_config = FuseCustomConfig()
if isinstance(fuse_custom_config, Dict):
warnings.warn(
"Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
"in a future version. Please pass in a FuseCustomConfig instead.")
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
torch._C._log_api_usage_once("quantization_api.quantize_fx.fuse_fx")
graph_module = torch.fx.symbolic_trace(model)
preserved_attributes: Set[str] = set()
if fuse_custom_config:
preserved_attributes = set(fuse_custom_config.preserved_attributes)
for attr_name in preserved_attributes:
setattr(graph_module, attr_name, getattr(model, attr_name))
return _fuse_fx(graph_module, False, fuse_custom_config, backend_config)
def prepare_fx(
model: torch.nn.Module,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any]],
example_inputs: Tuple[Any, ...],
prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None,
_equalization_config: Optional[Union[QConfigMapping, Dict[str, Any]]] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> ObservedGraphModule:
r""" Prepare a model for post training static quantization
Args:
* `model` (required): torch.nn.Module model, must be in eval mode
* `qconfig_mapping` (required): mapping from model ops to qconfigs::
from torch.quantization import QConfigMapping
qconfig_mapping = QConfigMapping() \
.set_global(global_qconfig) \
.set_object_type(torch.nn.Linear, qconfig1) \
.set_object_type(torch.nn.functional.linear, qconfig1) \
.set_module_name_regex("foo.*bar.*conv[0-9]+", qconfig1) \
.set_module_name_regex("foo.*bar.*", qconfig2) \
.set_module_name_regex("foo.*", qconfig3) \
.set_module_name("module1", qconfig1) \
.set_module_name("module2", qconfig2) \
.set_module_name_object_type_order("module3", torch.nn.functional.linear, 0, qconfig3)
The precedence of different settings:
set_global < set_object_type < set_module_name_regex < set_module_name < set_module_name_object_type_order
* `example_inputs`: (required) Example inputs for forward function of the model
* `prepare_custom_config`: customization configuration for quantization tool.
See :class:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig` for more detail::
from torch.ao.quantization.fx.custom_config import PrepareCustomConfig
prepare_custom_config = PrepareCustomConfig() \
.set_standalone_module_name("module1", qconfig_mapping, example_inputs, \
child_prepare_custom_config, backend_config) \
.set_standalone_module_class(MyStandaloneModule, qconfig_mapping, example_inputs, \
child_prepare_custom_config, backend_config) \
.set_float_to_observed_mapping(FloatCustomModule, ObservedCustomModule) \
.set_non_traceable_module_names(["module2", "module3"]) \
.set_non_traceable_module_classes([NonTraceableModule1, NonTraceableModule2]) \
.set_input_quantized_indexes([0]) \
.set_output_quantized_indexes([0]) \
.set_preserved_attributes(["attr1", "attr2"])
* `_equalization_config`: config for specifying how to perform equalization on the model
* `backend_config`: config that specifies how operators are quantized
in a backend, this includes how the operaetors are observed,
supported fusion patterns, how quantize/dequantize ops are
inserted, supported dtypes etc. The structure of the dictionary is still WIP
and will change in the future, please don't use right now.
Return:
A GraphModule with observer (configured by qconfig_mapping), ready for calibration
Example::
import torch
from torch.ao.quantization import get_default_qconfig_mapping
from torch.ao.quantization import prepare_fx
class Submodule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.linear(x)
return x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.sub = Submodule()
def forward(self, x):
x = self.linear(x)
x = self.sub(x) + x
return x
# initialize a floating point model
float_model = M().eval()
# define calibration function
def calibrate(model, data_loader):
model.eval()
with torch.no_grad():
for image, target in data_loader:
model(image)
# qconfig is the configuration for how we insert observers for a particular
# operator
# qconfig = get_default_qconfig("fbgemm")
# Example of customizing qconfig:
# qconfig = torch.ao.quantization.QConfig(
# activation=MinMaxObserver.with_args(dtype=torch.qint8),
# weight=MinMaxObserver.with_args(dtype=torch.qint8))
# `activation` and `weight` are constructors of observer module
# qconfig_mapping is a collection of quantization configurations, user can
# set the qconfig for each operator (torch op calls, functional calls, module calls)
# in the model through qconfig_mapping
# the following call will get the qconfig_mapping that works best for models
# that target "fbgemm" backend
qconfig_mapping = get_default_qconfig_mapping("fbgemm")
# We can customize qconfig_mapping in different ways.
# e.g. set the global qconfig, which means we will use the same qconfig for
# all operators in the model, this can be overwritten by other settings
# qconfig_mapping = QConfigMapping().set_global(qconfig)
# e.g. quantize the linear submodule with a specific qconfig
# qconfig_mapping = QConfigMapping().set_module_name("linear", qconfig)
# e.g. quantize all nn.Linear modules with a specific qconfig
# qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig)
# for a more complete list, please see the docstring for :class:`torch.ao.quantization.QConfigMapping`
# argument
# example_inputs is a tuple of inputs, that is used to infer the type of the
# outputs in the model
# currently it's not used, but please make sure model(*example_inputs) runs
example_inputs = (torch.randn(1, 3, 224, 224),)
# TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
# e.g. backend_config = get_default_backend_config("fbgemm")
# `prepare_fx` inserts observers in the model based on qconfig_mapping and
# backend_config. If the configuration for an operator in qconfig_mapping
# is supported in the backend_config (meaning it's supported by the target
# hardware), we'll insert observer modules according to the qconfig_mapping
# otherwise the configuration in qconfig_mapping will be ignored
#
# Example:
# in qconfig_mapping, user sets linear module to be quantized with quint8 for
# activation and qint8 for weight:
# qconfig = torch.ao.quantization.QConfig(
# observer=MinMaxObserver.with_args(dtype=torch.quint8),
# weight=MinMaxObserver.with-args(dtype=torch.qint8))
# Note: current qconfig api does not support setting output observer, but
# we may extend this to support these more fine grained control in the
# future
#
# qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig)
# in backend config, linear module also supports in this configuration:
# weighted_int8_dtype_config = DTypeConfig(
# input_dtype=torch.quint8,
# output_dtype=torch.quint8,
# weight_dtype=torch.qint8,
# bias_type=torch.float)
# linear_pattern_config = BackendPatternConfig(torch.nn.Linear) \
# .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
# .add_dtype_config(weighted_int8_dtype_config) \
# ...
# backend_config = BackendConfig().set_backend_pattern_config(linear_pattern_config)
# `prepare_fx` will check that the setting requested by suer in qconfig_mapping
# is supported by the backend_config and insert observers and fake quant modules
# in the model
prepared_model = prepare_fx(float_model, qconfig_mapping, example_inputs)
# Run calibration
calibrate(prepared_model, sample_inference_data)
"""
torch._C._log_api_usage_once("quantization_api.quantize_fx.prepare_fx")
return _prepare_fx(
model,
qconfig_mapping,
False, # is_qat
example_inputs,
prepare_custom_config,
_equalization_config,
backend_config,
)
def prepare_qat_fx(
model: torch.nn.Module,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any]],
example_inputs: Tuple[Any, ...],
prepare_custom_config: Union[PrepareCustomConfig, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> ObservedGraphModule:
r""" Prepare a model for quantization aware training
Args:
* `model`: torch.nn.Module model, must be in train mode
* `qconfig_mapping`: see :func:`~torch.ao.quantization.prepare_fx`
* `example_inputs`: see :func:`~torch.ao.quantization.prepare_fx`
* `prepare_custom_config`: see :func:`~torch.ao.quantization.prepare_fx`
* `backend_config`: see :func:`~torch.ao.quantization.prepare_fx`
Return:
A GraphModule with fake quant modules (configured by qconfig_mapping), ready for
quantization aware training
Example::
import torch
from torch.ao.quantization import get_default_qat_qconfig_mapping
from torch.ao.quantization import prepare_fx
class Submodule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.linear(x)
return x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.sub = Submodule()
def forward(self, x):
x = self.linear(x)
x = self.sub(x) + x
return x
# initialize a floating point model
float_model = M().train()
# (optional, but preferred) load the weights from pretrained model
# float_model.load_weights(...)
# define the training loop for quantization aware training
def train_loop(model, train_data):
model.train()
for image, target in data_loader:
...
# qconfig is the configuration for how we insert observers for a particular
# operator
# qconfig = get_default_qconfig("fbgemm")
# Example of customizing qconfig:
# qconfig = torch.ao.quantization.QConfig(
# activation=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8)),
# weight=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8)))
# `activation` and `weight` are constructors of observer module
# qconfig_mapping is a collection of quantization configurations, user can
# set the qconfig for each operator (torch op calls, functional calls, module calls)
# in the model through qconfig_mapping
# the following call will get the qconfig_mapping that works best for models
# that target "fbgemm" backend
qconfig_mapping = get_default_qat_qconfig("fbgemm")
# We can customize qconfig_mapping in different ways, please take a look at
# the doctring for :func:`~torch.ao.quantization.prepare_fx` for different ways
# to configure this
# example_inputs is a tuple of inputs, that is used to infer the type of the
# outputs in the model
# currently it's not used, but please make sure model(*example_inputs) runs
example_inputs = (torch.randn(1, 3, 224, 224),)
# TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
# e.g. backend_config = get_default_backend_config("fbgemm")
# `prepare_qat_fx` inserts observers in the model based on qconfig_mapping and
# backend_config, if the configuration for an operator in qconfig_mapping
# is supported in the backend_config (meaning it's supported by the target
# hardware), we'll insert fake_quantize modules according to the qconfig_mapping
# otherwise the configuration in qconfig_mapping will be ignored
# see :func:`~torch.ao.quantization.prepare_fx` for a detailed explanation of
# how qconfig_mapping interacts with backend_config
prepared_model = prepare_qat_fx(float_model, qconfig_mapping, example_inputs)
# Run training
train_loop(prepared_model, train_loop)
"""
torch._C._log_api_usage_once("quantization_api.quantize_fx.prepare_qat_fx")
return _prepare_fx(
model,
qconfig_mapping,
True, # is_qat
example_inputs,
prepare_custom_config,
backend_config=backend_config,
)
def _convert_fx(
graph_module: GraphModule,
is_reference: bool,
convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None,
is_standalone_module: bool = False,
_remove_qconfig: bool = True,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> torch.nn.Module:
""" `is_standalone_module`: see docs in :func:`~torch.ao.quantization.prepare_standalone_module_fx`
"""
if convert_custom_config is None:
convert_custom_config = ConvertCustomConfig()
if isinstance(convert_custom_config, Dict):
warnings.warn(
"Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
"in a future version. Please pass in a ConvertCustomConfig instead.")
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
_check_is_graph_module(graph_module)
quantized = convert(
graph_module,
is_reference,
convert_custom_config,
is_standalone_module,
_remove_qconfig_flag=_remove_qconfig,
qconfig_mapping=qconfig_mapping,
backend_config=backend_config,
)
preserved_attributes = convert_custom_config.preserved_attributes
for attr_name in preserved_attributes:
setattr(quantized, attr_name, getattr(graph_module, attr_name))
return quantized
def convert_fx(
graph_module: GraphModule,
convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None,
_remove_qconfig: bool = True,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> torch.nn.Module:
r""" Convert a calibrated or trained model to a quantized model
Args:
* `graph_module`: A prepared and calibrated/trained model (GraphModule)
* `is_reference`: flag for whether to produce a reference quantized model,
which will be a common interface between pytorch quantization with
other backends like accelerators
* `convert_custom_config`: custom configurations for convert function.
See :class:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig` for more detail::
from torch.ao.quantization.fx.custom_config import ConvertCustomConfig
convert_custom_config = ConvertCustomConfig() \
.set_observed_to_quantized_mapping(ObservedCustomModule, QuantizedCustomModule) \
.set_preserved_attributes(["attr1", "attr2"])
* `_remove_qconfig`: Option to remove the qconfig attributes in the model after convert.
* `qconfig_mapping`: config for specifying how to convert a model for quantization.
The keys must include the ones in the qconfig_mapping passed to `prepare_fx` or `prepare_qat_fx`,
with the same values or `None`. Additional keys can be specified with values set to `None`.
For each entry whose value is set to None, we skip quantizing that entry in the model::
qconfig_mapping = QConfigMapping
.set_global(qconfig_from_prepare)
.set_object_type(torch.nn.functional.add, None) # skip quantizing torch.nn.functional.add
.set_object_type(torch.nn.functional.linear, qconfig_from_prepare)
.set_module_name("foo.bar", None) # skip quantizing module "foo.bar"
* `backend_config`: A configuration for the backend which describes how
operators should be quantized in the backend, this includes quantization
mode support (static/dynamic/weight_only), dtype support (quint8/qint8 etc.),
observer placement for each operators and fused operators. Detailed
documentation can be found in torch/ao/quantization/backend_config/README.md
Return:
A quantized model (GraphModule)
Example::
# prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
# convert_fx converts a calibrated/trained model to a quantized model for the
# target hardware, this includes converting the model first to a reference
# quantized model, and then lower the reference quantized model to a backend
# Currently, the supported backends are fbgemm (onednn), qnnpack (xnnpack) and
# they share the same set of quantized operators, so we are using the same
# lowering procedure
#
# backend_config defines the corresponding reference quantized module for
# the weighted modules in the model, e.g. nn.Linear
# TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
# e.g. backend_config = get_default_backend_config("fbgemm")
quantized_model = convert_fx(prepared_model)
"""
torch._C._log_api_usage_once("quantization_api.quantize_fx.convert_fx")
return _convert_fx(
graph_module,
is_reference=False,
convert_custom_config=convert_custom_config,
_remove_qconfig=_remove_qconfig,
qconfig_mapping=qconfig_mapping,
backend_config=backend_config,
)
def convert_to_reference_fx(
graph_module: GraphModule,
convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None,
_remove_qconfig: bool = True,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> torch.nn.Module:
r""" Convert a calibrated or trained model to a reference quantized model,
see https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md for more details,
reference quantzied model is a standard representation of a quantized model provided
by FX Graph Mode Quantization, it can be further lowered to run on the target
hardware, like accelerators
Args:
* `graph_module`: A prepared and calibrated/trained model (GraphModule)
* `convert_custom_config`: custom configurations for convert function.
See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more detail.
* `_remove_qconfig`: Option to remove the qconfig attributes in the model after convert.
* `qconfig_mapping`: config for specifying how to convert a model for quantization.
See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more detail.
* `backend_config`: A configuration for the backend which describes how
operators should be quantized in the backend. See
:func:`~torch.ao.quantization.quantize_fx.convert_fx` for more detail.
Return:
A reference quantized model (GraphModule)
Example::
# prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
# TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
# e.g. backend_config = get_default_backend_config("fbgemm")
reference_quantized_model = convert_to_reference_fx(prepared_model)
"""
torch._C._log_api_usage_once("quantization_api.quantize_fx.convert_to_reference_fx")
return _convert_fx(
graph_module,
is_reference=True,
convert_custom_config=convert_custom_config,
_remove_qconfig=_remove_qconfig,
qconfig_mapping=qconfig_mapping,
backend_config=backend_config,
)
def _convert_standalone_module_fx(
graph_module: GraphModule,
is_reference: bool = False,
convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None,
) -> torch.nn.Module:
r""" [Internal use only] Convert a model produced by :func:`~torch.ao.quantization.prepare_standalone_module_fx`
and convert it to a quantized model
Returns a quantized standalone module, whether input/output is quantized is
specified by prepare_custom_config, with
input_quantized_idxs, output_quantized_idxs, please
see docs for prepare_fx for details
"""
return _convert_fx(
graph_module,
is_reference,
convert_custom_config,
is_standalone_module=True,
)
|
pytorch-master
|
torch/ao/quantization/quantize_fx.py
|
import re
from typing import Dict, Callable, Union
from .utils import (
get_combined_dict,
_parent_name,
)
from .quantization_mappings import (
get_default_qat_module_mappings,
)
from .qconfig import QConfigAny
from .qconfig_mapping import QConfigMapping
# TODO: revisit this list. Many helper methods shouldn't be public
__all__ = [
"get_flattened_qconfig_dict",
"get_object_type_qconfig",
"get_module_name_qconfig",
"get_module_name_regex_qconfig",
"maybe_adjust_qconfig_for_module_type_or_name",
"update_qconfig_for_qat",
]
def get_object_type_qconfig(
qconfig_mapping: QConfigMapping,
object_type: Union[Callable, str],
fallback_qconfig: QConfigAny) -> QConfigAny:
return qconfig_mapping.object_type_qconfigs.get(object_type, fallback_qconfig)
def get_module_name_regex_qconfig(qconfig_mapping, module_name, fallback_qconfig):
for regex_pattern, qconfig in qconfig_mapping.module_name_regex_qconfigs.items():
if re.match(regex_pattern, module_name):
# first match wins
return qconfig
return fallback_qconfig
def get_module_name_qconfig(qconfig_mapping, module_name, fallback_qconfig):
if module_name == '':
# module name qconfig not found
return fallback_qconfig
if module_name in qconfig_mapping.module_name_qconfigs:
return qconfig_mapping.module_name_qconfigs[module_name]
else:
parent, _ = _parent_name(module_name)
return get_module_name_qconfig(qconfig_mapping, parent, fallback_qconfig)
def maybe_adjust_qconfig_for_module_type_or_name(qconfig_mapping, module_type, module_name, global_qconfig):
# get qconfig for module_name,
# fallback to module_name_regex_qconfig, module_type_qconfig,
# global_qconfig if necessary
module_type_qconfig = get_object_type_qconfig(
qconfig_mapping, module_type, global_qconfig)
module_name_regex_qconfig = get_module_name_regex_qconfig(
qconfig_mapping, module_name, module_type_qconfig)
module_name_qconfig = get_module_name_qconfig(
qconfig_mapping, module_name, module_name_regex_qconfig)
return module_name_qconfig
def get_flattened_qconfig_dict(qconfig_mapping: QConfigMapping) -> Dict[Union[Callable, str], QConfigAny]:
""" flatten the global, object_type and module_name qconfig
to the same qconfig_dict so that it can be used by
propagate_qconfig_ function.
"module_name_regex" is ignored for now since it's not supported
in propagate_qconfig_, but it can be fixed later.
For example:
Input: {
"": qconfig,
"object_type": [
(torch.add, qconfig)
],
"module_name": [
("conv", qconfig)
]
}
Output: {
"": qconfig,
torch.add: qconfig,
"conv": qconfig
}
"""
flattened: Dict[Union[Callable, str], QConfigAny] = {"": qconfig_mapping.global_qconfig}
for obj, qconfig in qconfig_mapping.object_type_qconfigs.items():
flattened[obj] = qconfig
for obj, qconfig in qconfig_mapping.module_name_qconfigs.items():
flattened[obj] = qconfig
return flattened
def update_qconfig_for_qat(
qconfig_mapping: QConfigMapping,
additional_qat_module_mapping: Dict[Callable, Callable]):
"""
Update the qconfig_dict to account for module swaps during QAT.
During QAT we perform a module swap on the nn.Module types to the corresponding nn.qat.modules types.
"""
all_qat_mappings = get_combined_dict(
get_default_qat_module_mappings(), additional_qat_module_mapping)
object_type_dict = qconfig_mapping.object_type_qconfigs
new_object_type_dict = object_type_dict.copy()
for k, v in new_object_type_dict.items():
if k in all_qat_mappings:
object_type_dict[all_qat_mappings[k]] = v
|
pytorch-master
|
torch/ao/quantization/qconfig_mapping_utils.py
|
"""
This module implements nonuniform observers used to collect statistics about
the values observed during calibration (PTQ) or training (QAT).
"""
import torch
import itertools
import matplotlib.pyplot as plt
from torch.ao.quantization.observer import ObserverBase
from torch.ao.quantization.experimental.apot_utils import float_to_apot, apot_to_float
# TODO: Consider adding NonUniformQuantizationObserverBase class
# when more than one non-uniform method is implemented
class APoTObserver(ObserverBase):
b: int
k: int
n: int
min_val: torch.Tensor
max_val: torch.Tensor
def __init__(
self,
b,
k,
dtype=torch.quint8) -> None:
super().__init__(dtype)
self.b = b
self.k = k
self.min_val = torch.tensor([])
self.max_val = torch.tensor([])
# min_val and max_val are optional args to override
# the min_val and max_val observed by forward
def calculate_qparams(self, signed):
return self._calculate_qparams(signed, self.min_val, self.max_val)
r""" Calculates nonuniform quantization parameters according to APoT paper:
https://arxiv.org/pdf/1909.13144.pdf.
Arg:
signed: specifies whether to include signed values in quantization level calculations
min_val: optional arg that can override min_val internal attribute
max_val: optional arg that can override max_val internal attribute
Returns:
alpha: alpha quantization parameter, max of abs value of observed values
gamma: gamma quantization parameter, defined to ensure that alpha is the maximum of the range
quantization_levels: non-uniform quantization levels (fp representation)
level_indices: int representation of quantization_levels indices
"""
def _calculate_qparams(self, signed: bool, min_val=None, max_val=None):
if min_val is not None:
self.min_val = min_val
if max_val is not None:
self.max_val = max_val
# compute alpha
alpha = torch.max(-self.min_val, self.max_val)
# check for valid inputs of b, k
assert(self.k and self.k != 0)
assert(self.b % self.k == 0)
# compute n and store as member variable
self.n = self.b // self.k
# store a tensor of subtensors (all levels)
p_all = []
# create levels
for i in range(0, self.n):
p_curr = torch.tensor([0])
for j in range(0, (2 ** self.k - 2) + 1):
curr_ele = 2 ** (- (i + j * self.n))
p_append = torch.tensor([curr_ele])
p_curr = torch.cat((p_curr, p_append))
# introduce signed numbers
if signed:
p_curr = torch.cat((p_curr, torch.tensor([-curr_ele])))
if signed:
# sort tensor in reverse order before adding to list if signed
sorted, indices = torch.sort(p_curr, descending=True)
p_all.append(sorted)
else:
p_all.append(p_curr)
# gamma calculation:
# loop through all tensors
# if signed, add element at index 0 for each tensor
# else, add element at index 1 for each tensor
# gamma defined to ensure alpha is at max of range
p_sum = 0.0
for tens in p_all:
if signed:
p_sum += float(tens[0])
else:
p_sum += float(tens[1])
# assign gamma
gamma = alpha / p_sum
# calculate cartesian product
cartesian_product = list(itertools.product(*p_all))
quantization_levels_list = []
# calculate sum of each row
for row in cartesian_product:
sum = 0.0
for ele in row:
sum += ele
quantization_levels_list.append(sum)
quantization_levels_gamma = [float(gamma) * ele for ele in quantization_levels_list]
quantization_levels = torch.tensor(quantization_levels_gamma)
level_indices = torch.tensor([])
quantization_levels, level_indices = quantization_levels.sort()
return (alpha, gamma, quantization_levels, level_indices)
r"""Records the running minimum and maximum of ``x``.
Args:
x_orig: Tensor to be observed for min and max val"""
def forward(self, x_orig):
if x_orig.numel() == 0:
return x_orig
x = x_orig.detach()
min_val, max_val = torch.aminmax(x)
if self.min_val.numel():
min_val = torch.min(min_val, self.min_val)
if self.max_val.numel():
max_val = torch.max(max_val, self.max_val)
self.min_val = min_val
self.max_val = max_val
return x_orig
r"""Displays visualization of APoT quantization levels
Args:
observer: APoTObserver to calculate qparams
signed: bool to indicate if qparams should be signed/unsigned
"""
def quant_levels_visualization(self, signed=False):
alpha, gamma, quantization_levels, level_indices = self.calculate_qparams(signed)
xs = [float(x) / 1000.0 for x in range(1000)]
ys = [apot_to_float(float_to_apot(x, quantization_levels, level_indices, alpha),
quantization_levels, level_indices).item() for x in xs]
f = plt.figure(figsize=(15, 10))
plt.plot(xs, ys)
plt.title("APoT Quantization Plot")
plt.xlabel("Full Precision")
plt.ylabel("Quantized")
plt.show()
|
pytorch-master
|
torch/ao/quantization/experimental/observer.py
|
r"""
This file contains utility functions to convert values
using APoT nonuniform quantization methods.
"""
import math
r"""Converts floating point input into APoT number
based on quantization levels
"""
def float_to_apot(x, levels, indices, alpha):
# clip values based on alpha
if x < -alpha:
return -alpha
elif x > alpha:
return alpha
levels_lst = list(levels)
indices_lst = list(indices)
min_delta = math.inf
best_idx = 0
for level, idx in zip(levels_lst, indices_lst):
cur_delta = abs(level - x)
if cur_delta < min_delta:
min_delta = cur_delta
best_idx = idx
return best_idx
r"""Converts floating point input into
reduced precision floating point value
based on quantization levels
"""
def quant_dequant_util(x, levels, indices):
levels_lst = list(levels)
indices_lst = list(indices)
min_delta = math.inf
best_fp = 0.0
for level, idx in zip(levels_lst, indices_lst):
cur_delta = abs(level - x)
if cur_delta < min_delta:
min_delta = cur_delta
best_fp = level
return best_fp
r"""Converts APoT input into floating point number
based on quantization levels
"""
def apot_to_float(x_apot, levels, indices):
idx = list(indices).index(x_apot)
return levels[idx]
|
pytorch-master
|
torch/ao/quantization/experimental/apot_utils.py
|
import torch
from torch import Tensor
from torch.ao.quantization.experimental.quantizer import quantize_APoT, dequantize_APoT
class fake_quantize_function(torch.autograd.Function):
@staticmethod
def forward(ctx, # type: ignore[override]
x: Tensor,
alpha: Tensor,
gamma: Tensor,
quantization_levels: Tensor,
level_indices: Tensor) -> Tensor:
quantized_result = quantize_APoT(x, alpha, gamma, quantization_levels, level_indices)
# calculate mask tensor
mask = x.detach().apply_(lambda x: (x <= alpha and x >= -alpha))
result = dequantize_APoT(quantized_result)
ctx.save_for_backward(mask)
return result
@staticmethod
def backward(ctx, grad_output: Tensor) -> Tensor: # type: ignore[override]
mask = ctx.saved_tensors
return grad_output * mask
|
pytorch-master
|
torch/ao/quantization/experimental/fake_quantize_function.py
|
import torch
from torch.ao.quantization.experimental.quantizer import APoTQuantizer
# class to store APoT quantized tensor
class TensorAPoT():
quantizer: APoTQuantizer
data: torch.Tensor
def __init__(self, quantizer: APoTQuantizer, apot_data: torch.Tensor):
self.quantizer = quantizer
self.data = apot_data
def int_repr(self):
return self.data
|
pytorch-master
|
torch/ao/quantization/experimental/APoT_tensor.py
|
import torch
import numpy as np
from torch.nn.quantized.modules.utils import WeightedQuantizedModule
from torch.ao.quantization.experimental.observer import APoTObserver
from torch.ao.quantization.experimental.quantizer import quantize_APoT
class LinearAPoT(WeightedQuantizedModule):
r"""
A quantized linear module with quantized tensor as inputs and outputs
to support APoT quantization.
We adopt the same interface as `torch.nn.Linear`, see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`~torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
alpha: `alpha` qparam of output Quantized Tensor, type: Tensor
gamma: `gamma` qparam of output Quantized Tensor, type: Tensor
quantization_levels: `quantization_levels` qparam of output Quantized Tensor, type: Tensor
level_indices: `level_indices` qparam of output Quantized Tensor, type: Tensor
weight: APoT quantized tensor from weight2quantize
weight_transposed: transposed weight tensor, used in linear transformation calculation (y = x * A^T + b)
"""
def __init__(self, weight2quantize: torch.Tensor, b: int, k: int):
assert weight2quantize.dim() == 2
assert b % k == 0
super().__init__()
self.b = b
self.k = k
self.n = self.b // self.k
observer = APoTObserver(b=self.b, k=self.k)
observer(weight2quantize)
self.alpha, self.gamma, self.quantization_levels, self.level_indices = observer.calculate_qparams(signed=False)
quantized_weight = quantize_APoT(weight2quantize, self.alpha, self.gamma, self.quantization_levels, self.level_indices)
self.weight = quantized_weight.data
self.weight_transposed = torch.transpose(self.weight, 0, 1)
def decompose_APoT(self, x):
r"""
Decompose binary representation of APoT values into list of k-sized blocks
Args:
x (Tensor): binary representation of APoT quantized tensor
"""
# remove "0b" prefix from binary representation
x = x[2:]
# initialize list of blocks
blocks = []
while x:
blocks.append(x[0:self.k])
x = x[self.k:]
return blocks
def bitshift_mul(self, weight_val, r):
r"""
Compute multiplication of weight_val * r using bitshifting
method discussed in APoT paper: https://arxiv.org/pdf/1909.13144.pdf
Args:
weight_val: list of binary digits representing APoT quantized weight value
r: int representing uniformly quantized activation value
"""
product = 0
idx = len(weight_val) - 1
place = 0
while idx >= 0:
block = weight_val[idx]
# reverse digits in block
block = block[::-1]
curr_block_result = 0
for ele in block:
if int(ele):
curr_block_result += r << place
place += 1
idx -= 1
product += curr_block_result
return product
def matmul(self, decomposed_weight, activation):
r"""
Perform matrix multiplication between decomposed_weight and
activation by calling bitshift_mul function for each value
Args:
decomposed_weight (Tensor): APoT quantized weight decomposed into binary
activation (Tensor): uniformly quantized activation
"""
rows1 = activation.size(dim=0)
cols1 = activation.size(dim=1)
rows2 = decomposed_weight.shape[0]
cols2 = decomposed_weight.shape[1]
result = torch.zeros(rows1, cols2)
# compute matrix multiplication with bitshifts
for i in range(rows1):
for j in range(cols2):
for k in range(rows2):
weight_val = decomposed_weight[k][j]
r = int(activation[i][k])
product = self.bitshift_mul(weight_val, r)
result[i][j] += product
return result
def forward(self, activation: torch.Tensor) -> torch.FloatTensor:
r"""
Multiply APoT quantized weight and uniformly quantized activation (dtype: quint8)
with bitshifting instead of matrix multiplication.
Result has dtype torch.float32
Args:
activation (Tensor): uniformly quantized activation tensor
"""
assert activation.dim() == 2
weight_rows = self.weight_transposed.size()[0]
weight_cols = self.weight_transposed.size()[1]
decomposed_weight = np.empty(shape=(weight_rows, weight_cols), dtype=object)
for row in range(weight_rows):
for col in range(weight_cols):
decomposed_weight[row][col] = self.decompose_APoT(bin(self.weight_transposed[row][col]))
result = self.matmul(decomposed_weight, activation).type(torch.FloatTensor)
return result
@classmethod
def from_reference(cls, # type: ignore[override]
ref_qlinear,
alpha: torch.Tensor,
gamma: torch.Tensor,
quantization_levels: torch.Tensor,
level_indices: torch.Tensor):
raise NotImplementedError
|
pytorch-master
|
torch/ao/quantization/experimental/linear.py
|
import torch
from torch import Tensor
from torch.ao.quantization.experimental.observer import APoTObserver
from torch.ao.quantization.fake_quantize import FakeQuantizeBase
from torch.ao.quantization.experimental.fake_quantize_function import fake_quantize_function
class APoTFakeQuantize(FakeQuantizeBase):
alpha: Tensor
gamma: Tensor
quantization_levels: Tensor
level_indices: Tensor
def __init__(self, observer=APoTObserver, **observer_kwargs):
super().__init__()
self.activation_post_process = observer(**observer_kwargs)
self.dtype = self.activation_post_process.dtype
def calculate_qparams(self, signed=False): # type: ignore[override]
return self.activation_post_process.calculate_qparams(signed=signed)
def forward(self, X: torch.Tensor): # type: ignore[override]
if self.observer_enabled[0] == 1:
self.activation_post_process.forward(X)
result = self.activation_post_process.calculate_qparams(signed=False)
self.alpha = result[0]
self.gamma = result[1]
self.quantization_levels = result[2]
self.level_indices = result[3]
if self.fake_quant_enabled[0] == 1:
assert (self.alpha is not None
and self.gamma is not None
and self.quantization_levels is not None
and self.level_indices is not None), "Must set qparams for fake quant"
X = fake_quantize_function.apply(X, self.alpha, self.gamma, self.quantization_levels, self.level_indices)
return X
|
pytorch-master
|
torch/ao/quantization/experimental/fake_quantize.py
|
import torch
from torch.ao.quantization.qconfig import QConfig
from torch.ao.quantization import MinMaxObserver
from torch.ao.quantization.fake_quantize import FakeQuantize
from torch.ao.quantization.experimental.fake_quantize import APoTFakeQuantize
"""
Default symmetric fake_quant for activations.
"""
default_symmetric_fake_quant = FakeQuantize.with_args(observer=MinMaxObserver,
qscheme=torch.per_tensor_symmetric,
dtype=torch.quint8)
"""
Default symmetric fake_quant for weights.
"""
default_weight_symmetric_fake_quant = FakeQuantize.with_args(observer=MinMaxObserver,
qscheme=torch.per_tensor_symmetric,
dtype=torch.qint8)
# uniform activation and weight, b=8 k=2
uniform_qconfig_8bit = QConfig(activation=default_symmetric_fake_quant,
weight=default_weight_symmetric_fake_quant.with_args)
# uniform activation, APoT weight, b=8 k=2
apot_weight_qconfig_8bit = QConfig(activation=default_symmetric_fake_quant.with_args,
weight=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.qint8))
# APoT activation and uniform weight, b=8 k=2
apot_qconfig_8bit = QConfig(activation=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.quint8),
weight=APoTFakeQuantize.with_args(b=8, k=2, dtype=torch.qint8))
# uniform activation and weight, b=4 k=2
uniform_qconfig_4bit = QConfig(activation=default_symmetric_fake_quant.with_args(quant_min=0,
quant_max=15),
weight=default_weight_symmetric_fake_quant.with_args(quant_min=0,
quant_max=15))
# uniform activation, APoT weight, b=4 k=2
apot_weight_qconfig_4bit = QConfig(activation=default_symmetric_fake_quant.with_args(quant_min=0,
quant_max=15),
weight=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.qint8))
# APoT activation and uniform weight, b=4 k=2
apot_qconfig_4bit = QConfig(activation=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.quint8),
weight=APoTFakeQuantize.with_args(b=4, k=2, dtype=torch.qint8))
|
pytorch-master
|
torch/ao/quantization/experimental/qconfig.py
|
import torch
from torch import Tensor
import numpy as np
from torch.ao.quantization.experimental.apot_utils import float_to_apot, apot_to_float, quant_dequant_util
# class to store APoT quantizer and
# implement quantize and dequantize
class APoTQuantizer():
alpha: torch.Tensor
gamma: torch.Tensor
quantization_levels: torch.Tensor
level_indices: torch.Tensor
def __init__(
self,
alpha: torch.Tensor,
gamma: torch.Tensor,
quantization_levels: torch.Tensor,
level_indices: torch.Tensor) -> None:
self.alpha = alpha
self.gamma = gamma
self.quantization_levels = quantization_levels
self.level_indices = level_indices
r""" Quantizes fp Tensor to integer APoT representation.
Conversion is based on the qparams from a specified APoT non-uniform observer.
The approach follows the method outlined in the APoT paper: https://arxiv.org/pdf/1909.13144.pdf.
Args:
tensor2quantize: fp Tensor
Returns:
result: APoT Tensor representation of tensor2quantize
"""
def quantize(self, tensor2quantize: Tensor):
result = torch.tensor([])
# map float_to_apot over tensor2quantize elements
tensor2quantize = tensor2quantize.detach().apply_(lambda x: float_to_apot(x,
self.quantization_levels,
self.level_indices,
self.alpha))
# convert to APoT int representation for dtype
tensor2quantize = tensor2quantize.int()
from torch.ao.quantization.experimental.APoT_tensor import TensorAPoT
result = TensorAPoT(self, tensor2quantize)
return result
r""" Dequantizes integer Tensor to floating point (fp) representation
based on the calculated quantization levels from a specified APoT non-uniform observer.
The approach follows the method outlined in the APoT paper: https://arxiv.org/pdf/1909.13144.pdf.
Args:
tensor2quantize: fp Tensor
Returns:
result: fp reduced precision representation of input Tensor
"""
def dequantize(self, apot_tensor) -> Tensor:
orig_size = apot_tensor.data.size()
apot_tensor_data = apot_tensor.data.flatten()
print(apot_tensor_data)
# map apot_to_float over tensor2quantize elements
result_temp = np.empty(shape=apot_tensor_data.size())
for i in range(len(apot_tensor_data)):
new_ele = apot_to_float(apot_tensor_data[i], self.quantization_levels, self.level_indices)
result_temp[i] = new_ele
result = torch.from_numpy(result_temp).reshape(orig_size)
return result
r""" Returns result of quantize -> dequantize on a fp Tensor (reduced precision)
based on the calculated quantization levels from a specified APoT non-uniform observer.
The approach follows the method outlined in the APoT paper: https://arxiv.org/pdf/1909.13144.pdf.
Args:
apot_tensor: quantized APoT Tensor to dequantize
Returns:
result: fp representation of input Tensor
"""
def quant_dequant(self, tensor2quantize: Tensor) -> Tensor:
levels_lst = list(self.quantization_levels)
result = tensor2quantize.apply_(lambda x: quant_dequant_util(x, levels_lst))
return result
def q_apot_alpha(self) -> float:
raise NotImplementedError
r""" Global method to create quantizer and call quantizer quantize_APoT
Args:
tensor2quantize: fp Tensor to quantize
alpha: Tensor qparam alpha (clipping level)
gamma: Tensor qparam gamma (scale factor for quantization levels)
quantization levels: Tensor with fp quantization levels
level indices: Tensor with integer quantization level indices
Returns:
result: ApoT Tensor representation of tensor2quantize
"""
def quantize_APoT(tensor2quantize: Tensor, alpha: Tensor, gamma: Tensor, quantization_levels: Tensor, level_indices: Tensor):
quantizer = APoTQuantizer(alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices)
result = quantizer.quantize(tensor2quantize)
return result
r""" Global method to create quantizer and call quantizer dequantize_APoT
Args:
apot_tensor: APoT Tensor to dequantize
Returns:
result: fp Tensor dequantized from apot_tensor
"""
def dequantize_APoT(apot_tensor) -> Tensor:
quantizer = apot_tensor.quantizer
result = quantizer.dequantize(apot_tensor)
return result
r""" Global method to create quantizer and call quantizer quant_dequant
Args:
tensor2quantize: fp Tensor to quantize
alpha: Tensor qparam alpha (clipping level)
gamma: Tensor qparam gamma (scale factor for quantization levels)
quantization levels: Tensor with fp quantization levels
level indices: Tensor with integer quantization level indices
Returns:
result: fp reduced precision Tensor from tensor2quantize
"""
def quant_dequant_APoT(tensor2quantize: Tensor,
alpha: Tensor,
gamma: Tensor,
quantization_levels: Tensor,
level_indices: Tensor) -> Tensor:
quantizer = APoTQuantizer(alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices)
result = quantizer.quant_dequant(tensor2quantize)
return result
|
pytorch-master
|
torch/ao/quantization/experimental/quantizer.py
|
import operator
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.qat as nniqat
import torch.nn.qat as nnqat
import torch.nn.quantized._reference as nnqr
from collections import namedtuple
from typing import List
from .observation_type import ObservationType
from .backend_config import BackendPatternConfig, DTypeConfig
from ..fuser_method_mappings import (
reverse_sequential_wrapper2,
reverse2,
reverse3,
fuse_conv_bn,
fuse_conv_bn_relu,
fuse_linear_bn,
fuse_convtranspose_bn,
)
# TODO: rename to be more explict, e.g. qat_conv_relu
_ConvMetadata = namedtuple(
"_ConvMetadata",
["root", "transpose", "bn", "reference", "transpose_reference",
"fused_conv_relu", "fused_conv_bn", "fused_conv_bn_relu",
"qat", "relu_qat", "bn_qat", "bn_relu_qat",
"func"])
_Conv1dMetadata = _ConvMetadata(
nn.Conv1d, nn.ConvTranspose1d, nn.BatchNorm1d, nnqr.Conv1d, nnqr.ConvTranspose1d,
nni.ConvReLU1d, nni.ConvBn1d, nni.ConvBnReLU1d,
nnqat.Conv1d, nniqat.ConvReLU1d, nniqat.ConvBn1d, nniqat.ConvBnReLU1d,
F.conv1d)
_Conv2dMetadata = _ConvMetadata(
nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d, nnqr.Conv2d, nnqr.ConvTranspose2d,
nni.ConvReLU2d, nni.ConvBn2d, nni.ConvBnReLU2d,
nnqat.Conv2d, nniqat.ConvReLU2d, nniqat.ConvBn2d, nniqat.ConvBnReLU2d,
F.conv2d)
_Conv3dMetadata = _ConvMetadata(
nn.Conv3d, nn.ConvTranspose3d, nn.BatchNorm3d, nnqr.Conv3d, nnqr.ConvTranspose3d,
nni.ConvReLU3d, nni.ConvBn3d, nni.ConvBnReLU3d,
nnqat.Conv3d, nniqat.ConvReLU3d, nniqat.ConvBn3d, nniqat.ConvBnReLU3d,
F.conv3d)
def _get_binary_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
binary_op_configs: List[BackendPatternConfig] = []
num_tensor_args_to_observation_type_mapping = {
# TODO: this is not used right now since we have extra check in prepare
# will need to change this to NO_OBSERVER later after we implemented
# Tensor dtype inference properly
0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT,
2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
}
for op_with_quantized_bop_scalar_variant in [operator.add, torch.add, operator.mul, torch.mul]:
bop_patterns = [
(torch.nn.ReLU, op_with_quantized_bop_scalar_variant),
(torch.nn.functional.relu, op_with_quantized_bop_scalar_variant),
(torch.relu, op_with_quantized_bop_scalar_variant),
op_with_quantized_bop_scalar_variant
]
for bop_pattern in bop_patterns:
binary_op_configs.append(
BackendPatternConfig(bop_pattern)
.set_dtype_configs(dtype_configs) # noqa: E131
._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping))
return binary_op_configs
def _get_linear_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
"""
Return all configs related to linear modules and ops.
"""
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
linear_configs: List[BackendPatternConfig] = []
# (1) Single linear modules/functions
# -------------------------------------
# linear module
linear_configs.append(
BackendPatternConfig(torch.nn.Linear)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear)
.set_qat_module(nnqat.Linear))
# linear qat module
linear_configs.append(
BackendPatternConfig(nnqat.Linear)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear))
# functional linear
linear_configs.append(
BackendPatternConfig(torch.nn.functional.linear)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
._set_input_type_to_index({"weight": 1, "bias": 2}))
# (2) Linear + relu
# -------------------
# 2.1 linear module + relu fusion config
# linear relu, linear module + relu module
linear_configs.append(
BackendPatternConfig((torch.nn.ReLU, torch.nn.Linear))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse_sequential_wrapper2(nni.LinearReLU))
.set_fused_module(nni.LinearReLU))
# linear relu, linear module + functional relu
linear_configs.append(
BackendPatternConfig((torch.nn.functional.relu, torch.nn.Linear))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse_sequential_wrapper2(nni.LinearReLU))
.set_fused_module(nni.LinearReLU))
# 2.2 linear module + relu, fused module configs
# linear relu, fused module
linear_configs.append(
BackendPatternConfig(nni.LinearReLU)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear)
.set_qat_module(nniqat.LinearReLU))
# linear relu, qat fused module
linear_configs.append(
BackendPatternConfig(nniqat.LinearReLU)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear))
# 2.3 functional linear + relu configs
# linear relu, functional linear + relu module
linear_configs.append(
BackendPatternConfig((torch.nn.ReLU, F.linear))
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs))
# linear relu, functional linear + functional relu
linear_configs.append(
BackendPatternConfig((F.relu, F.linear))
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs))
# (3) Linear + batchnorm
# ------------------------
# 3.1 linear bn fusion
linear_configs.append(
BackendPatternConfig((nn.BatchNorm1d, nn.Linear))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse2(fuse_linear_bn))
.set_fused_module(nni.LinearBn1d))
# 3.2 linear bn fused
# linear bn, fused module
linear_configs.append(
BackendPatternConfig(nni.LinearBn1d)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear)
.set_qat_module(nniqat.LinearBn1d))
# linear bn, qat fused module
linear_configs.append(
BackendPatternConfig(nniqat.LinearBn1d)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear))
return linear_configs
def _get_conv_configs(dtype_configs):
"""
Return all configs related to conv modules and ops.
"""
conv_configs = []
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
for convs in [_Conv1dMetadata, _Conv2dMetadata, _Conv3dMetadata]:
# (1) Single conv modules/functions
# -----------------------------------
# conv module
conv_configs.append(
BackendPatternConfig(convs.root)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(convs.root)
.set_reference_quantized_module(convs.reference)
.set_qat_module(convs.qat))
# conv qat module
conv_configs.append(
BackendPatternConfig(convs.qat)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(convs.root)
.set_reference_quantized_module(convs.reference))
# functional conv
conv_configs.append(
BackendPatternConfig(convs.func)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
._set_input_type_to_index({"weight": 1, "bias": 2}))
# (2) Conv + relu
# -----------------
# 2.1 conv module + relu fusion configs
# conv relu fusion, conv module + relu module
conv_configs.append(
BackendPatternConfig((torch.nn.ReLU, convs.root))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse_sequential_wrapper2(convs.fused_conv_relu))
.set_fused_module(convs.fused_conv_relu))
# conv relu fusion, conv module + functional relu
conv_configs.append(
BackendPatternConfig((F.relu, convs.root))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse_sequential_wrapper2(convs.fused_conv_relu))
.set_fused_module(convs.fused_conv_relu))
# 2.2 conv module + relu fused module configs
# conv relu, fused module
conv_configs.append(
BackendPatternConfig(convs.fused_conv_relu)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(convs.root)
.set_reference_quantized_module(convs.reference)
.set_qat_module(convs.relu_qat))
# conv relu, qat fused module
conv_configs.append(
BackendPatternConfig(convs.relu_qat)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(convs.root)
.set_reference_quantized_module(convs.reference))
# 2.3 functional conv + relu configs
# conv relu, functional conv + relu module
conv_configs.append(
BackendPatternConfig((torch.nn.ReLU, convs.func))
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs))
# conv relu, functional conv + functional relu
conv_configs.append(
BackendPatternConfig((F.relu, convs.func))
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs))
# fused conv relu
conv_configs.append(
BackendPatternConfig(convs.fused_conv_relu)
.set_dtype_configs(dtype_configs) # noqa: E131
.set_qat_module(convs.relu_qat))
conv_configs.append(
BackendPatternConfig(convs.relu_qat)
.set_dtype_configs(dtype_configs) # noqa: E131
.set_root_module(convs.root)
.set_reference_quantized_module(convs.reference))
# (3) Conv + batchnorm (+ relu)
# -------------------------------
# 3.1 conv bn fusion configs
# conv + bn fusion
conv_configs.append(
BackendPatternConfig((convs.bn, convs.root))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse2(fuse_conv_bn))
.set_fused_module(convs.fused_conv_bn))
# conv + bn + relu module fusion
conv_configs.append(
BackendPatternConfig((nn.ReLU, (convs.bn, convs.root)))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse3(fuse_conv_bn_relu))
.set_fused_module(convs.fused_conv_bn_relu))
# conv + bn + relu functional fusion
conv_configs.append(
BackendPatternConfig((F.relu, (convs.bn, convs.root)))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_root_module(convs.root)
.set_fuser_method(reverse3(fuse_conv_bn_relu))
.set_fused_module(convs.fused_conv_bn_relu))
# TODO: we can add fusion for torch.relu as well
# 3.2 conv + bn (+ relu) fused module configs
# fused conv bn
conv_configs.append(
BackendPatternConfig(convs.fused_conv_bn)
.set_dtype_configs(dtype_configs) # noqa: E131
.set_qat_module(convs.bn_qat))
# fused conv bn relu
conv_configs.append(
BackendPatternConfig(convs.fused_conv_bn_relu)
.set_dtype_configs(dtype_configs) # noqa: E131
.set_qat_module(convs.bn_relu_qat))
# conv bn, qat fused module
conv_configs.append(
BackendPatternConfig(convs.bn_qat)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(convs.root)
.set_reference_quantized_module(convs.reference))
# conv bn relu, qat fused module
conv_configs.append(
BackendPatternConfig(convs.bn_relu_qat)
.set_observation_type(observation_type) # noqa: E131
.set_dtype_configs(dtype_configs)
.set_root_module(convs.root)
.set_reference_quantized_module(convs.reference))
# (4) conv transpose and its fusion
# 4.1 conv transpose config
conv_configs.append(
BackendPatternConfig(convs.transpose)
.set_dtype_configs(dtype_configs) # noqa: E131
.set_root_module(convs.transpose)
.set_reference_quantized_module(convs.transpose_reference))
# 4.2 conv transpose + bn fusion
conv_configs.append(
BackendPatternConfig((convs.bn, convs.transpose))
.set_dtype_configs(dtype_configs) # noqa: E131
.set_fuser_method(reverse2(fuse_convtranspose_bn))
.set_root_module(convs.transpose)
.set_reference_quantized_module(convs.transpose_reference))
return conv_configs
def _get_share_qparams_op_configs(dtype_configs):
""" Get the operator config for the operators that works for both float and quantized input
if input is quantized, the output Tensor shares the same quantization parameter
with input.
Example operator: avgpool2d, reshape, transpose, maxpool2d
Example observed operator:
observer_0 - avgpool2d - observer_0 (same observer instance as input)
"""
def _get_share_qprams_op_backend_config(op):
return BackendPatternConfig(op) \
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
.set_dtype_configs(dtype_configs)
share_qparams_ops = [
torch.nn.AdaptiveAvgPool1d,
torch.nn.AdaptiveAvgPool2d,
torch.nn.AdaptiveAvgPool3d,
torch.nn.AvgPool1d,
torch.nn.AvgPool2d,
torch.nn.AvgPool3d,
torch.nn.Hardtanh,
torch.nn.Identity,
torch.nn.MaxPool1d,
torch.nn.MaxPool2d,
torch.nn.MaxPool3d,
torch.nn.ReLU,
torch.nn.ReLU6,
torch.adaptive_avg_pool1d,
torch.nn.functional.adaptive_avg_pool2d,
torch.nn.functional.adaptive_avg_pool3d,
torch.nn.functional.hardtanh,
torch.nn.functional.hardtanh_,
torch.nn.functional.interpolate,
torch.nn.functional.max_pool1d,
torch.nn.functional.max_pool2d,
torch.nn.functional.max_pool3d,
torch.nn.functional.relu,
torch.nn.functional.relu6,
torch.avg_pool1d,
torch._C._nn.avg_pool2d,
torch._C._nn.avg_pool3d,
torch.clamp,
torch.flatten,
torch.mean,
torch.repeat_interleave,
torch.transpose,
torch.squeeze,
torch.stack,
torch.unsqueeze,
operator.floordiv,
"contiguous",
"clamp",
"detach",
"detach_",
"mean",
"permute",
"repeat",
"repeat_interleave",
"reshape",
"resize_",
"relu",
"relu_",
"shape",
"size",
"squeeze",
"squeeze_",
"transpose",
"unsqueeze",
"unsqueeze_",
"view"
]
return [_get_share_qprams_op_backend_config(op) for op in share_qparams_ops]
__all__ = [
"_get_binary_op_configs",
"_get_linear_configs",
"_get_conv_configs",
"_get_share_qparams_op_configs",
]
|
pytorch-master
|
torch/ao/quantization/backend_config/_common_operator_config_utils.py
|
from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig
from .native import get_native_backend_config, get_native_backend_config_dict
from .observation_type import ObservationType
from .tensorrt import get_tensorrt_backend_config, get_tensorrt_backend_config_dict
__all__ = [
"get_native_backend_config",
"get_native_backend_config_dict",
"get_tensorrt_backend_config",
"get_tensorrt_backend_config_dict",
]
|
pytorch-master
|
torch/ao/quantization/backend_config/__init__.py
|
from typing import List
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.qat as nnqat
import torch.nn.quantized._reference as nnqr
from ._common_operator_config_utils import (
_get_binary_op_configs,
_get_linear_configs,
_get_conv_configs,
_get_share_qparams_op_configs,
)
from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig
from .observation_type import ObservationType
from ..fake_quantize import FixedQParamsFakeQuantize
from ..fuser_method_mappings import (
reverse_sequential_wrapper2,
)
from ..qconfig_mapping import _FIXED_QPARAMS_OP_TO_OBSERVER
from ..utils import Pattern
# ===================
# | DTYPE CONFIGS |
# ===================
# weighted op int8 dtype config
# this is config for ops that has quantized weights, like linear, conv
weighted_op_int8_dtype_config = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.quint8,
weight_dtype=torch.qint8,
bias_dtype=torch.float,
)
default_op_quint8_dtype_config = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.quint8,
)
default_op_fp16_dtype_config = DTypeConfig(
input_dtype=torch.float16,
output_dtype=torch.float16,
weight_dtype=torch.float16,
bias_dtype=torch.float16,
)
default_dynamic_int8_dtype_config = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.float,
weight_dtype=torch.qint8,
bias_dtype=torch.float,
# currently the dtype check is not yet enabled, so we provided the dtype_configs but
# it is not really used yet,
# we will enable it a bit later after we moved everything to backend_config_dict
is_dynamic=True,
)
default_dynamic_float16_dtype_config = DTypeConfig(
input_dtype=torch.float16,
output_dtype=torch.float,
weight_dtype=torch.float16,
bias_dtype=torch.float,
# currently the dtype check is not yet enabled, so we provided the dtype_configs but
# it is not really used yet,
# we will enable it a bit later after we moved everything to backend_config_dict
is_dynamic=True,
)
weight_only_quint8_dtype_config = DTypeConfig(
input_dtype=torch.float,
output_dtype=torch.float,
weight_dtype=torch.quint8,
)
weight_only_quint4x2_dtype_config = DTypeConfig(
input_dtype=torch.float,
output_dtype=torch.float,
weight_dtype=torch.quint4x2,
)
# ======================
# | OPERATOR CONFIGS |
# ======================
def _get_default_op_backend_config(op: Pattern, dtype_configs: List[DTypeConfig]) -> BackendPatternConfig:
return BackendPatternConfig(op) \
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
.set_dtype_configs(dtype_configs)
_DEFAULT_OP_INT8_CONFIGS: List[BackendPatternConfig] = [
_get_default_op_backend_config(op, [default_op_quint8_dtype_config]) for op in [
torch.nn.ELU,
torch.nn.LeakyReLU,
torch.nn.Hardswish,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.Dropout,
torch.nn.PReLU,
torch.nn.functional.elu,
torch.nn.functional.hardswish,
torch.nn.functional.instance_norm,
torch.nn.functional.leaky_relu,
torch.nn.functional.dropout,
torch.nn.functional.layer_norm,
]]
def _get_fixed_qparams_op_configs(dtype_configs: List[DTypeConfig]) -> List[BackendPatternConfig]:
fixed_qparams_op_configs = []
for fixed_qparam_op, output_observer in _FIXED_QPARAMS_OP_TO_OBSERVER.items():
fixed_qparams_op_configs.append(
# TODO: The _overwrite_output keys are temporary, since we don't want to put observer
# in the configs we expect that it's provided by user
# What we want to put here is the requirement on observers, in this case dtype,
# quant_min, quant_max etc., but we need to first move all configs to
# backend_config_dict to do that, we'll remove these keys after we fully migrated
# everything to use backend_config_dict
BackendPatternConfig(fixed_qparam_op)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
.set_dtype_configs(dtype_configs)
._set_overwrite_output_fake_quantize(FixedQParamsFakeQuantize.with_args(observer=output_observer))
._set_overwrite_output_observer(output_observer))
return fixed_qparams_op_configs
_CAT_CONFIG = BackendPatternConfig(torch.cat) \
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
.add_dtype_config(default_op_quint8_dtype_config)
def _get_bn_configs() -> List[BackendPatternConfig]:
""" Get configs related to batchnorm
"""
bn_configs = []
bn_to_fused_bn = {
torch.nn.BatchNorm2d: nni.BNReLU2d,
torch.nn.BatchNorm3d: nni.BNReLU3d,
}
for bn in bn_to_fused_bn.keys():
fused_bn = bn_to_fused_bn[bn]
# bn module + relu module fusion config
bn_configs.append(
BackendPatternConfig((torch.nn.ReLU, bn))
.add_dtype_config(default_op_quint8_dtype_config) # noqa: E131
.set_fuser_method(reverse_sequential_wrapper2(fused_bn))
.set_fused_module(fused_bn))
# bn module + F.relu fusion config
bn_configs.append(
BackendPatternConfig((torch.nn.functional.relu, bn))
.add_dtype_config(default_op_quint8_dtype_config) # noqa: E131
.set_fuser_method(reverse_sequential_wrapper2(bn_to_fused_bn[bn]))
.set_fused_module(fused_bn))
bn_configs.append(
BackendPatternConfig(bn)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
.add_dtype_config(default_op_quint8_dtype_config))
# fused bn configs
for fused_bn in bn_to_fused_bn.values():
bn_configs.append(
BackendPatternConfig(fused_bn)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
.add_dtype_config(default_op_quint8_dtype_config))
return bn_configs
def _get_rnn_op_configs() -> List[BackendPatternConfig]:
rnn_op_configs = []
for rnn_op, ref_rnn_op in [
(nn.GRUCell, nnqr.GRUCell),
(nn.LSTMCell, nnqr.LSTMCell),
(nn.RNNCell, nnqr.RNNCell),
(nn.LSTM, nnqr.LSTM)
]:
rnn_op_configs.append(
BackendPatternConfig(rnn_op)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
.add_dtype_config(default_dynamic_int8_dtype_config)
.add_dtype_config(default_dynamic_float16_dtype_config)
.set_root_module(rnn_op)
.set_reference_quantized_module(ref_rnn_op))
return rnn_op_configs
def _get_embedding_op_configs() -> List[BackendPatternConfig]:
embedding_op_configs = []
for embedding_op, qat_embedding_op, ref_embedding_op in [
(nn.Embedding, nnqat.Embedding, nnqr.Embedding),
(nn.EmbeddingBag, nnqat.EmbeddingBag, nnqr.EmbeddingBag),
]:
embedding_op_configs.append(
BackendPatternConfig(embedding_op)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
.add_dtype_config(weight_only_quint8_dtype_config)
.add_dtype_config(weight_only_quint4x2_dtype_config)
.set_qat_module(qat_embedding_op)
.set_root_module(embedding_op)
.set_reference_quantized_module(ref_embedding_op)
._set_input_output_observed(False)) # This is temporary, and will be removed soon
# config for qat op
embedding_op_configs.append(
BackendPatternConfig(qat_embedding_op)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E131
.add_dtype_config(weight_only_quint8_dtype_config)
.add_dtype_config(weight_only_quint4x2_dtype_config)
.set_root_module(embedding_op)
.set_reference_quantized_module(ref_embedding_op)
._set_input_output_observed(False)) # This is temporary, and will be removed soon
return embedding_op_configs
def get_test_only_legacy_native_backend_config() -> BackendConfig:
"""
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional fp16 ops.
"""
conv_dtype_configs = [weighted_op_int8_dtype_config]
linear_dtype_configs = [
weighted_op_int8_dtype_config,
default_dynamic_int8_dtype_config,
default_dynamic_float16_dtype_config,
default_op_fp16_dtype_config,
]
binary_op_dtype_configs = [
weighted_op_int8_dtype_config,
default_op_fp16_dtype_config,
]
share_qparams_op_dtype_configs = [
default_op_quint8_dtype_config,
default_op_fp16_dtype_config
]
fixed_qparams_op_dtype_configs = [
weighted_op_int8_dtype_config,
default_op_fp16_dtype_config,
]
return BackendConfig("_native_and_fp16") \
.set_backend_pattern_configs(_DEFAULT_OP_INT8_CONFIGS) \
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
.set_backend_pattern_config(_CAT_CONFIG) \
.set_backend_pattern_configs(_get_bn_configs()) \
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
.set_backend_pattern_configs(_get_rnn_op_configs()) \
.set_backend_pattern_configs(_get_embedding_op_configs())
def get_native_backend_config() -> BackendConfig:
"""
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack).
"""
conv_dtype_configs = [weighted_op_int8_dtype_config]
linear_dtype_configs = [
weighted_op_int8_dtype_config,
default_dynamic_int8_dtype_config,
default_dynamic_float16_dtype_config,
]
binary_op_dtype_configs = [
weighted_op_int8_dtype_config,
]
share_qparams_op_dtype_configs = [
default_op_quint8_dtype_config,
]
fixed_qparams_op_dtype_configs = [
weighted_op_int8_dtype_config,
]
return BackendConfig("native") \
.set_backend_pattern_configs(_DEFAULT_OP_INT8_CONFIGS) \
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
.set_backend_pattern_configs(_get_fixed_qparams_op_configs(fixed_qparams_op_dtype_configs)) \
.set_backend_pattern_config(_CAT_CONFIG) \
.set_backend_pattern_configs(_get_bn_configs()) \
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs)) \
.set_backend_pattern_configs(_get_rnn_op_configs()) \
.set_backend_pattern_configs(_get_embedding_op_configs())
def get_native_backend_config_dict():
"""
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) in dictionary form.
"""
return get_native_backend_config().to_dict()
def get_test_only_legacy_native_backend_config_dict():
"""
Return the `BackendConfig` for PyTorch Native backend (fbgemm/qnnpack) with various additional
fp16 ops in dictionary form.
"""
return get_test_only_legacy_native_backend_config().to_dict()
__all__ = [
"get_test_only_legacy_native_backend_config",
"get_test_only_legacy_native_backend_config_dict",
"get_native_backend_config",
"get_native_backend_config_dict",
]
|
pytorch-master
|
torch/ao/quantization/backend_config/native.py
|
from enum import Enum
__all__ = ['ObservationType']
class ObservationType(Enum):
# this means input and output are observed with different observers, based
# on qconfig.activation
# example: conv, linear, softmax
OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT = 0
# this means the output will use the same observer instance as input, based
# on qconfig.activation
# example: torch.cat, maxpool
OUTPUT_SHARE_OBSERVER_WITH_INPUT = 1
|
pytorch-master
|
torch/ao/quantization/backend_config/observation_type.py
|
from typing import Dict, Any, List, Callable, Union, Tuple, Type
import torch
import torch.nn as nn
import torch.nn.functional as F
from .backend_config import BackendConfig, DTypeConfig
from ..quantization_types import Pattern
def get_pattern_to_dtype_configs(backend_config: BackendConfig) -> Dict[Pattern, List[DTypeConfig]]:
pattern_to_dtype_configs: Dict[Pattern, List[DTypeConfig]] = dict()
for pattern, config in backend_config.configs.items():
pattern_to_dtype_configs[pattern] = config.dtype_configs
return pattern_to_dtype_configs
def get_qat_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
qat_module_classes = []
for config in backend_config.configs.values():
if config.qat_module is not None:
qat_module_classes.append(config.qat_module)
return tuple(set(qat_module_classes))
def get_fused_module_classes(backend_config: BackendConfig) -> Tuple[type, ...]:
fused_module_classes = []
for config in backend_config.configs.values():
if config.fused_module is not None:
fused_module_classes.append(config.fused_module)
return tuple(set(fused_module_classes))
def get_pattern_to_input_type_to_index(backend_config: BackendConfig) -> Dict[Pattern, Dict[str, int]]:
pattern_to_input_type_to_index: Dict[Pattern, Dict[str, int]] = dict()
for pattern, config in backend_config.configs.items():
pattern_to_input_type_to_index[pattern] = config._input_type_to_index
return pattern_to_input_type_to_index
def get_root_module_to_quantized_reference_module(
backend_config: BackendConfig) -> Dict[Type[torch.nn.Module], Type[torch.nn.Module]]:
mapping: Dict[Type[torch.nn.Module], Type[torch.nn.Module]] = dict()
for config in backend_config.configs.values():
if config.root_module is not None and config.reference_quantized_module is not None:
mapping[config.root_module] = config.reference_quantized_module
return mapping
def get_fuser_method_mapping(backend_config: BackendConfig) -> Dict[Pattern, Union[nn.Sequential, Callable]]:
fuser_method_mapping : Dict[Pattern, Union[nn.Sequential, Callable]] = dict()
for pattern, config in backend_config.configs.items():
if config.fuser_method is not None:
fuser_method_mapping[pattern] = config.fuser_method
return fuser_method_mapping
def get_module_to_qat_module(backend_config: BackendConfig) -> Dict[Pattern, Type[torch.nn.Module]]:
module_to_qat_module: Dict[Pattern, Type[torch.nn.Module]] = dict()
for pattern, config in backend_config.configs.items():
if config.qat_module is not None:
module_to_qat_module[pattern] = config.qat_module
return module_to_qat_module
def get_fusion_pattern_to_root_node_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
""" Get a map from fusion pattern to a function that returns the root node
from the fusion pattern, e.g. the most common one is:
def get_root_node(node_pattern):
while not isinstance(node_pattern[-1], Node):
node_pattern = node_pattern[-1]
return node_pattern[-1]
This can work for all patterns whose root node is the "last node" in the pattern,
e.g. (torch.add, MatchAllNode, (torch.ReLU, torch.Conv2d))
"""
root_node_getter_mapping: Dict[Pattern, Callable] = dict()
for pattern, config in backend_config.configs.items():
if config._root_node_getter is not None:
root_node_getter_mapping[pattern] = config._root_node_getter
return root_node_getter_mapping
def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> Dict[Pattern, Callable]:
""" Get a map from fusion pattern to a function that returns extra input nodes
from the fusion pattern, in the order required by the root node. This is optional,
if not specified, we will not copy over any extra inputs for the root node.
Example:
# Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d))
# and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra
# argument to the fused module, we can unpack the pattern and return the node at
# MatchAllNode here
# we can implement extra_inputs_getter as follows:
def extra_inputs_getter(pattern) -> List[Any]:
add, extra_input, conv_pattern = pattern
return [extra_input]
"""
extra_inputs_getter_mapping: Dict[Pattern, Callable] = dict()
for pattern, config in backend_config.configs.items():
if config._extra_inputs_getter is not None:
extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter
return extra_inputs_getter_mapping
def remove_boolean_dispatch_from_name(p) -> Any:
"""
Some ops have a default string representation such as
'<function boolean_dispatch.<locals>.fn at 0x7ff1106bf280>',
this function replaces them with the hardcoded function names.
"""
if p is F.fractional_max_pool2d:
return "torch.nn.functional.fractional_max_pool2d"
elif p is F.fractional_max_pool3d:
return "torch.nn.functional.fractional_max_pool3d"
elif p is F.max_pool1d:
return "torch.nn.functional.max_pool1d"
elif p is F.max_pool2d:
return "torch.nn.functional.max_pool2d"
elif p is F.max_pool3d:
return "torch.nn.functional.max_pool3d"
elif p is F.adaptive_max_pool1d:
return "torch.nn.functional.adaptive_max_pool1d"
elif p is F.adaptive_max_pool2d:
return "torch.nn.functional.adaptive_max_pool2d"
elif p is F.adaptive_max_pool3d:
return "torch.nn.functional.adaptive_max_pool3d"
assert "boolean_dispatch" not in str(p), \
f"{p} does not have a human readable representation in " + \
"quantization documentation"
return p
def pattern_to_human_readable(p) -> Any:
if isinstance(p, tuple):
# nested patterns, recurse
return tuple(pattern_to_human_readable(inner_p) for inner_p in p)
elif isinstance(p, str):
# method names are already human readable
return p
else:
p = remove_boolean_dispatch_from_name(p)
return p
# TODO(future PR): move backend_config_dict to use dataclass and move this logic to
# the corresponding __str__ function
def entry_to_pretty_str(entry) -> str:
"""
Given a backend_config_dict entry, returns a string with the human readable
representation of it.
"""
s = "{\n"
# always output the pattern first
if "pattern" in entry:
pattern_str = pattern_to_human_readable(entry["pattern"])
s += f" 'pattern': {pattern_str},\n"
# custom output for dtype_configs to make it look nice
if "dtype_configs" in entry:
s += " 'dtype_configs': [\n"
for dtype_config in entry["dtype_configs"]:
s += " {\n"
for k, v in dtype_config.items():
s += f" '{k}': {v},\n"
s += " },\n"
s += " ],\n"
# custom output for num_tensor_args_to_observation_type to make it look nice
if "num_tensor_args_to_observation_type" in entry:
s += " 'num_tensor_args_to_observation_type': {\n"
for k, v in entry["num_tensor_args_to_observation_type"].items():
s += f" {k}: {v},\n"
s += " },\n"
# output all the other fields
custom_handled_fields = [
"pattern",
"dtype_configs",
"num_tensor_args_to_observation_type",
]
for field_name in entry:
if field_name in custom_handled_fields:
continue
s += f" '{field_name}': {entry[field_name]},\n"
s += "}"
return s
|
pytorch-master
|
torch/ao/quantization/backend_config/utils.py
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Type
import torch
from torch.ao.quantization.backend_config.observation_type import ObservationType
from torch.ao.quantization.observer import _PartialWrapper
from torch.ao.quantization.utils import Pattern
__all__ = [
"BackendConfig",
"BackendPatternConfig",
"DTypeConfig",
]
# DTypeConfig dict keys
INPUT_DTYPE_DICT_KEY = "input_dtype"
OUTPUT_DTYPE_DICT_KEY = "output_dtype"
WEIGHT_DTYPE_DICT_KEY = "weight_dtype"
BIAS_DTYPE_DICT_KEY = "bias_dtype"
IS_DYNAMIC_DICT_KEY = "is_dynamic"
# BackendConfig dict keys
NAME_DICT_KEY = "name"
CONFIGS_DICT_KEY = "configs"
# BackendPatternConfig dict keys
PATTERN_DICT_KEY = "pattern"
OBSERVATION_TYPE_DICT_KEY = "observation_type"
DTYPE_CONFIGS_DICT_KEY = "dtype_configs"
ROOT_MODULE_DICT_KEY = "root_module"
QAT_MODULE_DICT_KEY = "qat_module"
REFERENCE_QUANTIZED_MODULE_DICT_KEY = "reference_quantized_module_for_root"
FUSED_MODULE_DICT_KEY = "fused_module"
FUSER_METHOD_DICT_KEY = "fuser_method"
ROOT_NODE_GETTER_DICT_KEY = "root_node_getter"
EXTRA_INPUTS_GETTER_DICT_KEY = "extra_inputs_getter"
NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY = "num_tensor_args_to_observation_type"
INPUT_TYPE_TO_INDEX_DICT_KEY = "input_type_to_index"
INPUT_OUTPUT_OBSERVED_DICT_KEY = "input_output_observed"
OVERWRITE_OUTPUT_FAKE_QUANTIZE_DICT_KEY = "overwrite_output_fake_quantize"
OVERWRITE_OUTPUT_OBSERVER_DICT_KEY = "overwrite_output_observer"
@dataclass
class DTypeConfig:
"""
Config for the set of supported input/output activation, weight, and bias data types for the
patterns defined in :class:`~torch.ao.quantization.backend_config.BackendConfig`.
"""
input_dtype: Optional[torch.dtype] = None
output_dtype: Optional[torch.dtype] = None
weight_dtype: Optional[torch.dtype] = None
bias_dtype: Optional[torch.dtype] = None
is_dynamic: Optional[bool] = None
@classmethod
def from_dict(cls, dtype_config_dict: Dict[str, Any]) -> DTypeConfig:
"""
Create a `DTypeConfig` from a dictionary with the following items (all optional):
"input_dtype": torch.dtype
"output_dtype": torch.dtype
"weight_dtype": torch.dtype
"bias_type": torch.dtype
"is_dynamic": bool
"""
input_dtype = dtype_config_dict.get(INPUT_DTYPE_DICT_KEY, None)
output_dtype = dtype_config_dict.get(OUTPUT_DTYPE_DICT_KEY, None)
weight_dtype = dtype_config_dict.get(WEIGHT_DTYPE_DICT_KEY, None)
bias_dtype = dtype_config_dict.get(BIAS_DTYPE_DICT_KEY, None)
is_dynamic = dtype_config_dict.get(IS_DYNAMIC_DICT_KEY, None)
return cls(input_dtype, output_dtype, weight_dtype, bias_dtype, is_dynamic)
def to_dict(self) -> Dict[str, Any]:
"""
Convert this `DTypeConfig` to a dictionary with the items described in
:func:`~torch.ao.quantization.backend_config.DTypeConfig.from_dict`.
"""
dtype_config_dict: Dict[str, Any] = {}
if self.input_dtype is not None:
dtype_config_dict[INPUT_DTYPE_DICT_KEY] = self.input_dtype
if self.output_dtype is not None:
dtype_config_dict[OUTPUT_DTYPE_DICT_KEY] = self.output_dtype
if self.weight_dtype is not None:
dtype_config_dict[WEIGHT_DTYPE_DICT_KEY] = self.weight_dtype
if self.bias_dtype is not None:
dtype_config_dict[BIAS_DTYPE_DICT_KEY] = self.bias_dtype
if self.is_dynamic is not None:
dtype_config_dict[IS_DYNAMIC_DICT_KEY] = self.is_dynamic
return dtype_config_dict
class BackendConfig:
# TODO: refer to NativeBackendConfig once that is implemented
"""
Config that defines the set of patterns that can be quantized on a given backend, and how reference
quantized models can be produced from these patterns.
A pattern in this context refers to a module, a functional, an operator, or a directed acyclic graph
of the above. Each pattern supported on the target backend can be individually configured through
:class:`~torch.ao.quantization.backend_config.BackendPatternConfig` in terms of:
(1) The supported input/output activation, weight, and bias data types
(2) How observers and quant/dequant ops are inserted in order to construct the reference pattern, and
(3) (Optionally) Fusion, QAT, and reference module mappings.
The format of the patterns is described in:
https://github.com/pytorch/pytorch/blob/master/torch/ao/quantization/backend_config/README.md
Example usage::
import torch
from torch.ao.quantization.backend_config import BackendConfig, BackendPatternConfig, DTypeConfig, ObservationType
from torch.ao.quantization.fuser_method_mappings import reverse_sequential_wrapper2
weighted_int8_dtype_config = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.quint8,
weight_dtype=torch.qint8,
bias_type=torch.float)
linear_config = BackendPatternConfig(torch.nn.Linear) \
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
.add_dtype_config(weighted_int8_dtype_config) \
.set_root_module(torch.nn.Linear) \
.set_qat_module(torch.nn.qat.Linear) \
.set_reference_quantized_module(torch.nn.quantized._reference.Linear)
conv_relu_config = BackendPatternConfig((torch.nn.ReLU, torch.nn.Conv2d)) \
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
.add_dtype_config(weighted_int8_dtype_config) \
.set_fused_module(torch.nn.intrinsic.ConvReLU2d) \
.set_fuser_method(reverse_sequential_wrapper2(torch.nn.intrinsic.ConvReLU2d))
backend_config = BackendConfig("my_backend") \
.set_backend_pattern_config(linear_config) \
.set_backend_pattern_config(conv_relu_config)
"""
def __init__(self, name: str = ""):
self.name = name
self.configs: Dict[Pattern, BackendPatternConfig] = {}
def set_name(self, name: str) -> BackendConfig:
"""
Set the name of the target backend.
"""
self.name = name
return self
def set_backend_pattern_config(self, config: BackendPatternConfig) -> BackendConfig:
"""
Set the config for an pattern that can be run on the target backend.
This overrides any existing config for the given pattern.
"""
self.configs[config.pattern] = config
return self
def set_backend_pattern_configs(self, configs: List[BackendPatternConfig]) -> BackendConfig:
"""
Set the configs for patterns that can be run on the target backend.
This overrides any existing config for a given pattern if it was previously registered already.
"""
for conf in configs:
self.set_backend_pattern_config(conf)
return self
@classmethod
def from_dict(cls, backend_config_dict: Dict[str, Any]) -> BackendConfig:
"""
Create a `BackendConfig` from a dictionary with the following items:
"name": the name of the target backend
"configs": a list of dictionaries that each represents a `BackendPatternConfig`
"""
conf = cls(backend_config_dict.get(NAME_DICT_KEY, ""))
for d in backend_config_dict.get(CONFIGS_DICT_KEY, []):
if isinstance(d, BackendPatternConfig):
conf.set_backend_pattern_config(d)
elif isinstance(d, Dict):
conf.set_backend_pattern_config(BackendPatternConfig.from_dict(d))
else:
raise ValueError("Expected backend_config_dict['%s'] to be a dictionary" % CONFIGS_DICT_KEY)
return conf
def to_dict(self) -> Dict[str, Any]:
"""
Convert this `BackendConfig` to a dictionary with the items described in
:func:`~torch.ao.quantization.backend_config.BackendConfig.from_dict`.
"""
return {
NAME_DICT_KEY: self.name,
CONFIGS_DICT_KEY: [c.to_dict() for c in self.configs.values()],
}
class BackendPatternConfig:
"""
Config for ops defined in :class:`~torch.ao.quantization.backend_config.BackendConfig`.
The user can configure how a operator pattern graph is handled on a given backend using the following methods:
`set_observation_type`: sets how observers should be inserted for this pattern.
See :class:`~torch.ao.quantization.backend_config.ObservationType`
`add_dtype_config`: add a set of supported data types for this pattern
`set_root_module`: sets the module that represents the root for this pattern
`set_qat_module`: sets the module that represents the QAT implementation for this pattern
`set_reference_quantized_module`: sets the module that represents the reference quantized
implementation for this pattern's root module.
`set_fused_module`: sets the module that represents the fused implementation for this pattern
`set_fuser_method`: sets the function that specifies how to fuse the pattern for this pattern
For a detailed example usage, see :class:`~torch.ao.quantization.backend_config.BackendConfig`.
"""
def __init__(self, pattern: Pattern):
self.pattern = pattern
self.observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
self.dtype_configs: List[DTypeConfig] = []
self.root_module: Optional[Type[torch.nn.Module]] = None
self.qat_module: Optional[Type[torch.nn.Module]] = None
self.reference_quantized_module: Optional[Type[torch.nn.Module]] = None
self.fused_module: Optional[Type[torch.nn.Module]] = None
self.fuser_method: Optional[Callable] = None
# Temporary/internal configs
self._root_node_getter: Optional[Callable] = None
self._extra_inputs_getter: Optional[Callable] = None
self._num_tensor_args_to_observation_type: Dict[int, ObservationType] = {}
self._input_type_to_index: Dict[str, int] = {}
self._input_output_observed: Optional[bool] = None
self._overwrite_output_fake_quantize: Optional[_PartialWrapper] = None
self._overwrite_output_observer: Optional[_PartialWrapper] = None
def set_observation_type(self, observation_type: ObservationType) -> BackendPatternConfig:
"""
Set how observers should be inserted for this pattern.
"""
self.observation_type = observation_type
return self
def add_dtype_config(self, dtype_config: DTypeConfig) -> BackendPatternConfig:
"""
Register a set of supported input/output activation, weight, and bias data types for this pattern.
"""
self.dtype_configs.append(dtype_config)
return self
def set_dtype_configs(self, dtype_configs: List[DTypeConfig]) -> BackendPatternConfig:
"""
Set the supported input/output activation, weight, and bias data types for this pattern,
overriding all previously registered data types.
"""
self.dtype_configs = dtype_configs
return self
def set_root_module(self, root_module: Type[torch.nn.Module]) -> BackendPatternConfig:
"""
Set the module that represents the root for this pattern.
For example, the root module for :class:`torch.nn.intrinsic.LinearReLU` should be :class:`torch.nn.Linear`.
"""
self.root_module = root_module
return self
def set_qat_module(self, qat_module: Type[torch.nn.Module]) -> BackendPatternConfig:
"""
Set the module that represents the QAT implementation for this pattern.
"""
self.qat_module = qat_module
return self
def set_reference_quantized_module(self, reference_quantized_module: Type[torch.nn.Module]) -> BackendPatternConfig:
"""
Set the module that represents the reference quantized implementation for this pattern's root module.
"""
self.reference_quantized_module = reference_quantized_module
return self
def set_fused_module(self, fused_module: Type[torch.nn.Module]) -> BackendPatternConfig:
"""
Set the module that represents the fused implementation for this pattern.
"""
self.fused_module = fused_module
return self
def set_fuser_method(self, fuser_method: Callable) -> BackendPatternConfig:
"""
Set the function that specifies how to fuse the pattern for this pattern.
"""
self.fuser_method = fuser_method
return self
def _set_root_node_getter(self, root_node_getter: Callable) -> BackendPatternConfig:
self._root_node_getter = root_node_getter
return self
def _set_extra_inputs_getter(self, extra_inputs_getter: Callable) -> BackendPatternConfig:
self._extra_inputs_getter = extra_inputs_getter
return self
def _set_num_tensor_args_to_observation_type(
self, num_tensor_args_to_observation_type: Dict[int, ObservationType]) -> BackendPatternConfig:
self._num_tensor_args_to_observation_type = num_tensor_args_to_observation_type
return self
def _set_input_type_to_index(self, input_type_to_index: Dict[str, int]) -> BackendPatternConfig:
self._input_type_to_index = input_type_to_index
return self
def _set_input_output_observed(self, input_output_observed: bool) -> BackendPatternConfig:
self._input_output_observed = input_output_observed
return self
def _set_overwrite_output_fake_quantize(self, overwrite_output_fake_quantize: _PartialWrapper) -> BackendPatternConfig:
self._overwrite_output_fake_quantize = overwrite_output_fake_quantize
return self
def _set_overwrite_output_observer(self, overwrite_output_observer: _PartialWrapper) -> BackendPatternConfig:
self._overwrite_output_observer = overwrite_output_observer
return self
@classmethod
def from_dict(cls, backend_pattern_config_dict: Dict[str, Any]) -> BackendPatternConfig:
"""
Create a `BackendPatternConfig` from a dictionary with the following items:
"pattern": the pattern being configured
"observation_type": the :class:`~torch.ao.quantization.backend_config.ObservationType` that specifies how
observers should be inserted for this pattern
"dtype_configs": a list of dictionaries that represents :class:`~torch.ao.quantization.backend_config.DTypeConfig`s
"root_module": a :class:`torch.nn.Module` that represents the root for this pattern
"qat_module": a :class:`torch.nn.Module` that represents the QAT implementation for this pattern
"reference_quantized_module": a :class:`torch.nn.Module` that represents the reference quantized
implementation for this pattern's root module.
"fused_module": a :class:`torch.nn.Module` that represents the fused implementation for this pattern
"fuser_method": a function that specifies how to fuse the pattern for this pattern
"""
def _get_dtype_config(obj: Any) -> DTypeConfig:
"""
Convert the given object into a `DTypeConfig` if possible, else throw an exception.
"""
if isinstance(obj, DTypeConfig):
return obj
if isinstance(obj, Dict):
return DTypeConfig.from_dict(obj)
raise ValueError("Expected a list of DTypeConfigs in backend_pattern_config_dict[\"%s\"], got '%s'" %
(DTYPE_CONFIGS_DICT_KEY, type(obj)))
if PATTERN_DICT_KEY not in backend_pattern_config_dict:
raise ValueError("backend_pattern_config_dict must contain '%s'" % PATTERN_DICT_KEY)
conf = cls(backend_pattern_config_dict[PATTERN_DICT_KEY])
if OBSERVATION_TYPE_DICT_KEY in backend_pattern_config_dict:
conf.set_observation_type(backend_pattern_config_dict[OBSERVATION_TYPE_DICT_KEY])
for d in backend_pattern_config_dict.get(DTYPE_CONFIGS_DICT_KEY, []):
conf.add_dtype_config(_get_dtype_config(d))
conf.set_root_module(backend_pattern_config_dict.get(ROOT_MODULE_DICT_KEY, None))
conf.set_qat_module(backend_pattern_config_dict.get(QAT_MODULE_DICT_KEY, None))
conf.set_reference_quantized_module(backend_pattern_config_dict.get(REFERENCE_QUANTIZED_MODULE_DICT_KEY, None))
conf.set_fused_module(backend_pattern_config_dict.get(FUSED_MODULE_DICT_KEY, None))
conf.set_fuser_method(backend_pattern_config_dict.get(FUSER_METHOD_DICT_KEY, None))
conf._set_root_node_getter(backend_pattern_config_dict.get(ROOT_NODE_GETTER_DICT_KEY, None))
conf._set_extra_inputs_getter(backend_pattern_config_dict.get(EXTRA_INPUTS_GETTER_DICT_KEY, None))
conf._set_num_tensor_args_to_observation_type(
backend_pattern_config_dict.get(NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY, {}))
conf._set_input_type_to_index(backend_pattern_config_dict.get(INPUT_TYPE_TO_INDEX_DICT_KEY, {}))
conf._set_input_output_observed(backend_pattern_config_dict.get(INPUT_OUTPUT_OBSERVED_DICT_KEY, None))
conf._set_overwrite_output_fake_quantize(backend_pattern_config_dict.get(OVERWRITE_OUTPUT_FAKE_QUANTIZE_DICT_KEY, None))
conf._set_overwrite_output_observer(backend_pattern_config_dict.get(OVERWRITE_OUTPUT_OBSERVER_DICT_KEY, None))
return conf
def to_dict(self) -> Dict[str, Any]:
"""
Convert this `BackendPatternConfig` to a dictionary with the items described in
:func:`~torch.ao.quantization.backend_config.BackendPatternConfig.from_dict`.
"""
backend_pattern_config_dict: Dict[str, Any] = {
PATTERN_DICT_KEY: self.pattern,
OBSERVATION_TYPE_DICT_KEY: self.observation_type,
DTYPE_CONFIGS_DICT_KEY: [c.to_dict() for c in self.dtype_configs],
}
if self.root_module is not None:
backend_pattern_config_dict[ROOT_MODULE_DICT_KEY] = self.root_module
if self.qat_module is not None:
backend_pattern_config_dict[QAT_MODULE_DICT_KEY] = self.qat_module
if self.reference_quantized_module is not None:
backend_pattern_config_dict[REFERENCE_QUANTIZED_MODULE_DICT_KEY] = self.reference_quantized_module
if self.fused_module is not None:
backend_pattern_config_dict[FUSED_MODULE_DICT_KEY] = self.fused_module
if self.fuser_method is not None:
backend_pattern_config_dict[FUSER_METHOD_DICT_KEY] = self.fuser_method
if self._root_node_getter is not None:
backend_pattern_config_dict[ROOT_NODE_GETTER_DICT_KEY] = self._root_node_getter
if self._extra_inputs_getter is not None:
backend_pattern_config_dict[EXTRA_INPUTS_GETTER_DICT_KEY] = self._extra_inputs_getter
if len(self._num_tensor_args_to_observation_type) > 0:
backend_pattern_config_dict[NUM_TENSOR_ARGS_TO_OBSERVATION_TYPE_DICT_KEY] = self._num_tensor_args_to_observation_type
if len(self._input_type_to_index) > 0:
backend_pattern_config_dict[INPUT_TYPE_TO_INDEX_DICT_KEY] = self._input_type_to_index
if self._input_output_observed is not None:
backend_pattern_config_dict[INPUT_OUTPUT_OBSERVED_DICT_KEY] = self._input_output_observed
if self._overwrite_output_fake_quantize is not None:
backend_pattern_config_dict[OVERWRITE_OUTPUT_FAKE_QUANTIZE_DICT_KEY] = self._overwrite_output_fake_quantize
if self._overwrite_output_observer is not None:
backend_pattern_config_dict[OVERWRITE_OUTPUT_OBSERVER_DICT_KEY] = self._overwrite_output_observer
return backend_pattern_config_dict
|
pytorch-master
|
torch/ao/quantization/backend_config/backend_config.py
|
import torch
from .backend_config import BackendConfig, BackendPatternConfig, DTypeConfig
from .observation_type import ObservationType
from ._common_operator_config_utils import (
_get_binary_op_configs,
_get_linear_configs,
_get_conv_configs,
_get_share_qparams_op_configs,
)
def get_tensorrt_backend_config() -> BackendConfig:
"""
Return the `BackendConfig` for the TensorRT backend.
NOTE: Current api will change in the future, it's just to unblock experimentation for
new backends, please don't use it right now.
TODO: add a README when it's more stable
"""
# dtype configs
weighted_op_qint8_dtype_config = DTypeConfig(
input_dtype=torch.qint8,
output_dtype=torch.qint8,
weight_dtype=torch.qint8,
bias_dtype=torch.float,
)
non_weighted_op_qint8_dtype_config = DTypeConfig(
input_dtype=torch.qint8,
output_dtype=torch.qint8,
)
addmm_config = BackendPatternConfig(torch.addmm) \
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
.add_dtype_config(weighted_op_qint8_dtype_config) \
._set_input_type_to_index({
"bias": 0,
"input": 1,
"weight": 2,
})
cat_config = BackendPatternConfig(torch.cat) \
.set_observation_type(ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT) \
.add_dtype_config(non_weighted_op_qint8_dtype_config)
conv_dtype_configs = [
weighted_op_qint8_dtype_config,
]
linear_dtype_configs = [
weighted_op_qint8_dtype_config,
]
binary_op_dtype_configs = [
weighted_op_qint8_dtype_config,
]
share_qparams_op_dtype_configs = [
non_weighted_op_qint8_dtype_config,
]
# there might be things not supported in fx2trt, but it will error out
# during fx2trt conversion and can support them after that
return BackendConfig("tensorrt") \
.set_backend_pattern_configs(_get_conv_configs(conv_dtype_configs)) \
.set_backend_pattern_config(addmm_config) \
.set_backend_pattern_config(cat_config) \
.set_backend_pattern_configs(_get_linear_configs(linear_dtype_configs)) \
.set_backend_pattern_configs(_get_binary_op_configs(binary_op_dtype_configs)) \
.set_backend_pattern_configs(_get_share_qparams_op_configs(share_qparams_op_dtype_configs))
def get_tensorrt_backend_config_dict():
"""
Return the `BackendConfig` for the TensorRT backend in dictionary form.
"""
return get_tensorrt_backend_config().to_dict()
__all__ = [
"get_tensorrt_backend_config",
"get_tensorrt_backend_config_dict",
]
|
pytorch-master
|
torch/ao/quantization/backend_config/tensorrt.py
|
import torch
import copy
from torch.fx import GraphModule
from torch.fx.graph import Graph
from typing import Union, Dict, Any, Set
class FusedGraphModule(GraphModule):
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
self.preserved_attr_names = preserved_attr_names
preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)}
super().__init__(root, graph)
for attr in preserved_attrs:
setattr(self, attr, preserved_attrs[attr])
# GraphModule does not copy attributes which are not in the __dict__
# of vanilla nn.Module. So, we override __deepcopy__ in order
# to copy the quantization specific attributes correctly.
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return FusedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
class ObservedGraphModule(GraphModule):
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
self.preserved_attr_names = set([
'_activation_post_process_map',
'_activation_post_process_indexes',
'_patterns',
'_qconfig_map',
'_prepare_custom_config',
'_equalization_qconfig_map',
'_node_name_to_scope',
'_qconfig_mapping',
'_is_qat',
'_observed_node_names']).union(preserved_attr_names)
preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)}
super().__init__(root, graph)
for attr in preserved_attrs:
setattr(self, attr, preserved_attrs[attr])
# GraphModule does not copy attributes which are not in the __dict__
# of vanilla nn.Module. So, we override __deepcopy__ in order
# to copy the quantization specific attributes correctly.
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return ObservedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
def is_observed_module(module: Any) -> bool:
return isinstance(module, ObservedGraphModule)
class ObservedStandaloneGraphModule(ObservedGraphModule):
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
preserved_attr_names = preserved_attr_names.union(set([
"_standalone_module_input_quantized_idxs",
"_standalone_module_output_quantized_idxs"]))
super().__init__(root, graph, preserved_attr_names)
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return ObservedStandaloneGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
def is_observed_standalone_module(module: Any) -> bool:
return isinstance(module, ObservedStandaloneGraphModule)
def _save_packed_weight(self, destination, prefix, keep_vars):
for attr_name in dir(self):
if "_packed_weight" in attr_name and \
isinstance(getattr(self, attr_name), torch._C.ScriptObject): # type: ignore[attr-defined]
packed_weight = getattr(self, attr_name)
destination[prefix + attr_name] = packed_weight
class QuantizedGraphModule(GraphModule):
""" This class is created to make sure PackedParams
(e.g. LinearPackedParams, Conv2dPackedParams) to appear in state_dict
so that we can serialize and deserialize quantized graph module with
torch.save(m.state_dict()) and m.load_state_dict(state_dict)
"""
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph, preserved_attr_names: Set[str]):
self.preserved_attr_names = preserved_attr_names
preserved_attrs = {attr: getattr(root, attr) for attr in self.preserved_attr_names if hasattr(root, attr)}
super().__init__(root, graph)
for attr in preserved_attrs:
setattr(self, attr, preserved_attrs[attr])
self._register_state_dict_hook(_save_packed_weight)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
attrs_to_pop = []
for attr_name in state_dict:
if attr_name.startswith("_packed_weight") and isinstance(state_dict[attr_name], torch._C.ScriptObject): # type: ignore[attr-defined] # noqa: B950
setattr(self, attr_name, state_dict[attr_name])
attrs_to_pop.append(attr_name)
# pop the packed param attributesn
for attr_name in attrs_to_pop:
state_dict.pop(attr_name)
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return QuantizedGraphModule(fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names))
|
pytorch-master
|
torch/ao/quantization/fx/graph_module.py
|
import torch
from torch.fx.graph import Node, Graph
from ..utils import _parent_name
from torch.ao.quantization.quantization_types import NodePattern, Pattern
from ..fuser_method_mappings import get_fuser_method_new
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Union, List
from .custom_config import FuseCustomConfig
from .match_utils import MatchAllNode
from torch.nn.utils.parametrize import type_before_parametrizations
__all__ = [
"DefaultFuseHandler",
"FuseHandler",
]
# ----------------------------
# Fusion Pattern Registrations
# ----------------------------
# Base Pattern Handler
class FuseHandler(ABC):
""" Base handler class for the fusion patterns
"""
def __init__(self, node: Node):
pass
@abstractmethod
def fuse(self,
load_arg: Callable,
named_modules: Dict[str, torch.nn.Module],
fused_graph: Graph,
root_node: Node,
extra_inputs: List[Any],
matched_node_pattern: NodePattern,
fuse_custom_config: FuseCustomConfig,
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]],
is_qat: bool) -> Node:
pass
# TODO: move this to backend_config.fuse_handler
class DefaultFuseHandler(FuseHandler):
def __init__(
self,
node: Node):
super().__init__(node)
def fuse(self,
load_arg: Callable,
named_modules: Dict[str, torch.nn.Module],
fused_graph: Graph,
root_node: Node,
extra_inputs: List[Any],
matched_node_pattern: NodePattern,
fuse_custom_config: FuseCustomConfig,
fuser_method_mapping: Optional[Dict[Pattern, Union[torch.nn.Sequential, Callable]]],
is_qat: bool) -> Node:
assert root_node.op == "call_module", "Expecting module node to be a call_module Node"
root_module = named_modules[str(root_node.target)]
def get_modules(pattern):
""" Given a node pattern, extract the corresponding modules
e.g. input: (relu_node, (bn_node, conv_node))
output: (relu_module, (bn_module, conv_module))
"""
if isinstance(pattern, (tuple, list)):
n, *args = pattern
modules: List[torch.nn.Module] = []
modules.append(get_modules(n))
for a in args:
modules.append(get_modules(a))
return tuple(modules)
else:
n = pattern
if n.op == "call_module":
return named_modules[n.target]
elif n.op == "call_function" and n.target == torch.nn.functional.relu:
relu = torch.nn.ReLU()
relu.training = root_module.training
return relu
elif n.op == "call_function" or n.op == "call_method":
return n.target
else:
return MatchAllNode
# since relu can be used multiple times, we'll need to create a relu module for each match
matched_modules = get_modules(matched_node_pattern)
def get_matched_types(m):
if isinstance(m, tuple):
return tuple(map(get_matched_types, m))
if isinstance(m, torch.nn.Module):
return type_before_parametrizations(m)
return m
matched_module_types = get_matched_types(matched_modules)
module_parent_name, module_name = _parent_name(root_node.target)
fuser_method = get_fuser_method_new(matched_module_types, fuser_method_mapping)
# TODO: change the signature for fuser_method to take matched module patterns
# as input
fused_module = fuser_method(is_qat, *matched_modules)
setattr(named_modules[module_parent_name], module_name, fused_module)
extra_args = []
for input in extra_inputs:
extra_args.append(load_arg(input))
node = fused_graph.node_copy(root_node, load_arg)
args = list(node.args)
args.extend(extra_args)
node.args = tuple(args)
return node
|
pytorch-master
|
torch/ao/quantization/fx/fusion_patterns.py
|
import torch
from collections import defaultdict, OrderedDict
from typing import Callable, Any, Dict, Tuple, Set, List
from torch.ao.quantization import QConfig
from torch.ao.quantization.qconfig import add_module_to_qconfig_obs_ctr, QConfigAny, qconfig_equals
from torch.ao.quantization.quantize import (
is_activation_post_process,
)
from torch.ao.quantization.backend_config import (
DTypeConfig,
)
from torch.fx import (
GraphModule,
)
from torch.fx.graph import (
Graph,
)
from torch.nn.intrinsic import _FusedModule
from ..utils import (
_parent_name,
get_qconfig_dtypes,
)
from ..qconfig_mapping import (
OBJECT_TYPE_DICT_KEY,
MODULE_NAME_DICT_KEY,
MODULE_NAME_REGEX_DICT_KEY,
QConfigMapping,
)
from ..qconfig_mapping_utils import (
get_object_type_qconfig,
maybe_adjust_qconfig_for_module_type_or_name,
)
# TODO: revisit this list. Many helper methods shouldn't be public
__all__ = [
"check_is_valid_config_dict",
"compare_prepare_convert_qconfig_mappings",
"generate_qconfig_map",
"is_qconfig_supported_by_dtype_configs",
"maybe_adjust_qconfig_for_module_name_object_type_order",
"update_qconfig_for_fusion",
]
def maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping: QConfigMapping,
cur_module_path: str,
cur_object_type: Callable,
cur_object_type_idx: int,
fallback_qconfig: QConfigAny,
) -> QConfigAny:
for (module_name, object_type, index), qconfig in qconfig_mapping.module_name_object_type_order_qconfigs.items():
if (
(module_name == cur_module_path) and
(object_type == cur_object_type) and
(index == cur_object_type_idx)
):
return qconfig
return fallback_qconfig
def update_qconfig_for_fusion(model: GraphModule, qconfig_mapping: QConfigMapping):
"""
Update the QConfigMapping to account for fused modules such as LinearReLU.
This assumes the QConfigMapping's attributes have already been converted to OrderedDicts.
"""
object_type_dict = qconfig_mapping.object_type_qconfigs
if len(object_type_dict) == 0:
return qconfig_mapping
modules = dict(model.named_modules())
for node in model.graph.nodes:
if node.op == 'call_module' and node.target in modules:
maybe_fused_module = modules[str(node.target)]
if not isinstance(maybe_fused_module, _FusedModule):
continue
ops = list(maybe_fused_module._modules.values())
fused_qconfig = object_type_dict.get(type(ops[0]), None)
# Raise an error if the modules in the fused module have
# different qconfigs specified in the qconfig_dict
# TODO: currently it only works for modules,
# need to make this work for torch.nn.functional.relu
# TODO: currently it only works for object_type configurations,
# ideally it should work for different types of configurations,
# maybe we want to redesign this part
for op in ops[1:]:
if not qconfig_equals(object_type_dict.get(type(op), None), fused_qconfig):
raise LookupError(
"During fusion, we need to specify the same " +
f"qconfigs for all module types in {type(maybe_fused_module)} " +
f"offending type: {type(op)}")
if fused_qconfig is not None:
object_type_dict[type(maybe_fused_module)] = fused_qconfig
def generate_qconfig_map(
root: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
input_graph: Graph,
qconfig_mapping: QConfigMapping,
node_name_to_scope: Dict[str, Tuple[str, type]]) -> Dict[str, QConfigAny]:
global_qconfig = qconfig_mapping.global_qconfig
qconfig_map = dict()
# example:
#
# {'foo.bar': {F.linear: 0, F.conv2d: 1, ...}, ...}
#
# meaning in submodule 'foo.bar', we have seen 0 F.linear and
# 1 F.conv2d invocations so far.
submodule_to_object_type_to_cur_idx: Dict[str, Dict[Callable, int]] = \
defaultdict(lambda: defaultdict(int))
for node in input_graph.nodes:
qconfig = None
if node.op == "get_attr":
module_name, _ = _parent_name(node.target)
qconfig = maybe_adjust_qconfig_for_module_type_or_name(
qconfig_mapping, type(modules[module_name]), module_name, global_qconfig)
qconfig_with_device_check = add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
elif node.op == "call_function":
# precedence: module_name_qconfig
# > function_qconfig > global_qconfig
# module_name takes precedence over function qconfig
function_qconfig = get_object_type_qconfig(
qconfig_mapping, node.target, global_qconfig)
module_path, module_type = node_name_to_scope[node.name]
qconfig = maybe_adjust_qconfig_for_module_type_or_name(
qconfig_mapping, module_type, module_path, function_qconfig)
cur_object_type_idx = \
submodule_to_object_type_to_cur_idx[module_path][node.target]
submodule_to_object_type_to_cur_idx[module_path][node.target] += 1
qconfig = maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, module_path, node.target, cur_object_type_idx, qconfig)
qconfig_with_device_check = add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
elif node.op == "call_method":
module_path, module_type = node_name_to_scope[node.name]
# first use node.target (string) to get the qconfig
# this is to support configs like
# "object_type": [("reshpe", qconfig)]
qconfig = maybe_adjust_qconfig_for_module_type_or_name(
qconfig_mapping, node.target, module_path, global_qconfig)
# if there is no special config for the method, we'll fall back to the
# config for the module that contains the call_method node
qconfig = maybe_adjust_qconfig_for_module_type_or_name(
qconfig_mapping, module_type, module_path, qconfig)
# currently call_method does not support modifying qconfig
# by order, we can add this later if it is needed.
qconfig_with_device_check = add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
elif node.op == 'call_module':
# if the node is an observer, just continue - don't add it to the qconfig_map
if is_activation_post_process(modules[node.target]):
continue
qconfig = maybe_adjust_qconfig_for_module_type_or_name(
qconfig_mapping, type(modules[node.target]), node.target, global_qconfig)
module_path, module_type = node_name_to_scope[node.name]
# Note: for call_module, the module_path is the current module's name.
# to meaningfully count invocations, we need to count them in the parent
# module.
parent_name, _ = _parent_name(module_path)
cur_object_type_idx = \
submodule_to_object_type_to_cur_idx[parent_name][module_type]
submodule_to_object_type_to_cur_idx[parent_name][module_type] += 1
qconfig = maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, parent_name, module_type, cur_object_type_idx,
qconfig)
qconfig_with_device_check = add_module_to_qconfig_obs_ctr(qconfig, modules.get(node.target, None))
# regex is not supported eager mode propagate_qconfig_, we'll
# need to set the qconfig explicitly here in case regex
# is used
modules[node.target].qconfig = qconfig_with_device_check
else:
qconfig_with_device_check = None
qconfig_map[node.name] = qconfig_with_device_check
return qconfig_map
def check_is_valid_config_dict(config_dict: Any, allowed_keys: Set[str], dict_name: str) -> None:
r""" Checks if the given config_dict has the correct keys
Args:
`config_dict`: dictionary whose keys we want to check
"""
for k in config_dict.keys():
if k not in allowed_keys:
raise ValueError(
'Expected ' + dict_name + ' to have the following keys: ' +
str(allowed_keys) + '. But found \'' + k +
'\' instead.')
def compare_prepare_convert_qconfig_mappings(
prepare_qconfig_mapping: QConfigMapping,
convert_qconfig_mapping: QConfigMapping):
r""" Compare the qconfig_mapping passed in convert to the one from prepare and check the values
Args:
`prepare_qconfig_mapping`: configuration for prepare quantization step
`convert_qconfig_mapping`: configuration for convert quantization step
"""
assert qconfig_equals(prepare_qconfig_mapping.global_qconfig, convert_qconfig_mapping.global_qconfig), \
"Expected global qconfigs to be the same in the prepare and convert quantization configs"
prepare_dicts: List[OrderedDict] = [
prepare_qconfig_mapping.object_type_qconfigs,
prepare_qconfig_mapping.module_name_qconfigs,
prepare_qconfig_mapping.module_name_regex_qconfigs,
]
convert_dicts: List[OrderedDict] = [
convert_qconfig_mapping.object_type_qconfigs,
convert_qconfig_mapping.module_name_qconfigs,
convert_qconfig_mapping.module_name_regex_qconfigs,
]
dict_names = [OBJECT_TYPE_DICT_KEY, MODULE_NAME_DICT_KEY, MODULE_NAME_REGEX_DICT_KEY]
for i in range(len(prepare_dicts)):
for name, qconfig in prepare_dicts[i].items():
assert name in convert_dicts[i], "Missing key {} {} in convert QConfigMapping \
when it was present in prepare".format(dict_names[i], name)
assert convert_dicts[i][name] is None \
or qconfig_equals(prepare_dicts[i][name], convert_dicts[i][name]), \
"Expected convert QConfigMapping to have the same qconfig as prepare for key {} {}; \
prepare: {}; convert: {}".format(dict_names[i], name, prepare_dicts[i][name], convert_dicts[i][name])
def is_qconfig_supported_by_dtype_configs(qconfig: QConfig, dtype_configs: List[DTypeConfig]):
for dtype_config in dtype_configs:
is_dynamic = dtype_config.is_dynamic
if is_dynamic is None:
is_dynamic = False
input_dtype = dtype_config.input_dtype or torch.float
weight_dtype = dtype_config.weight_dtype or torch.float
bias_dtype = dtype_config.bias_dtype or torch.float
output_dtype = dtype_config.output_dtype or torch.float
qconfig_activation_dtype, qconfig_weight_dtype, qconfig_compute_dtype = \
get_qconfig_dtypes(qconfig)
qconfig_bias_dtype = torch.float16 \
if qconfig_activation_dtype == torch.float16 and \
qconfig_weight_dtype == torch.float16 \
else torch.float
if is_dynamic:
is_match = input_dtype == qconfig_compute_dtype and \
output_dtype == torch.float and \
weight_dtype == qconfig_weight_dtype
else:
is_match = input_dtype == qconfig_activation_dtype and \
output_dtype == qconfig_activation_dtype and \
weight_dtype == qconfig_weight_dtype and \
bias_dtype == qconfig_bias_dtype
if is_match:
return True
return False
|
pytorch-master
|
torch/ao/quantization/fx/qconfig_utils.py
|
from ._lower_to_native_backend import _lower_to_native_backend
from .graph_module import QuantizedGraphModule
from ..qconfig import QConfigAny
from typing import Dict, Tuple
__all__ = ['lower_to_fbgemm']
def lower_to_fbgemm(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny],
node_name_to_scope: Dict[str, Tuple[str, type]]
) -> QuantizedGraphModule:
""" Lower a quantized reference model (with reference quantized operator patterns)
to fbgemm
"""
return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)
|
pytorch-master
|
torch/ao/quantization/fx/lower_to_fbgemm.py
|
import warnings
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.intrinsic as nni
from torch.fx import GraphModule
from torch.fx.graph import Node
from ..observer import _with_args, ObserverBase, PerChannelMinMaxObserver
from ..utils import _parent_name, check_min_max_valid
from .utils import (
get_new_attr_name_with_prefix,
maybe_get_next_module,
WEIGHT_INDEX_DICT,
)
CUSTOM_MODULE_SUPP_LIST: List[Any] = []
def reshape_scale(scale: torch.Tensor, axis: int, input: torch.Tensor) -> torch.Tensor:
"""Reshapes the scale so that we can multiply it to the input by the given axis.
"""
new_shape = [1] * input.ndim
new_shape[axis] = input.size(axis)
return scale.view(new_shape)
class _InputEqualizationObserver(nn.Module):
r"""Observer for tracking the running min/max values of input columns, and
computing the quantization parameters for the overall min/max input values.
Args:
dtype: Quantized data type
qscheme: Quantization scheme
quant_min: Minimum quantization value. If unspecified, it will
follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will
follow the 8-bit setup.
The running minimum/maximum :math:`x_\text{min/max}` are computed in the
same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`,
with the difference that the running min/max values are stored per column.
This observer is intended to be used along with a WeightEqualizationObserver
to calculate the equalization scale.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
quant_min=None, quant_max=None, factory_kwargs=None) -> None:
super(_InputEqualizationObserver, self).__init__()
if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
raise TypeError("Input qscheme must be per-tensor")
self.dtype = dtype
self.qscheme = qscheme
self.input_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
self.equalization_scale = torch.tensor(1)
self.equalization_shape: List[int] = []
def forward(self, x_orig):
if not (x_orig.ndim >= 2 and x_orig.ndim <= 5):
raise ValueError("InputEqualizationObserver only supports Linear and Conv layers")
# Calculate the shape needed to reshape the equalization scale later (needed for Conv layers)
self.equalization_shape = [1] * x_orig.ndim
self.equalization_shape[1] = x_orig.size(1)
return self.input_obs(x_orig)
def get_input_minmax(self):
return (self.input_obs.min_val, self.input_obs.max_val)
def set_equalization_scale(self, equalization_scale):
# Reshape the equalization scale along axis=1 so that it can be
# multiplied with the input along axis=1
if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
return
self.equalization_scale = torch.reshape(equalization_scale, self.equalization_shape)
def calculate_scaled_minmax(self):
r""" Returns the scaled min/max inputs
"""
if self.equalization_scale.nelement() == 1 and self.equalization_scale == torch.tensor(1):
warnings.warn(
"Must call calculate_equalization_scale before calling calculate_scaled_minmax. " +
"Will not scale the next quantization observer."
)
return None, None
# Calculate qparams for the scaled min/max inputs
# Scale the input by the equalization scale located at the same column
# index
(min_inputs, max_inputs) = self.get_input_minmax()
equalization_scale_reshaped = reshape_scale(self.equalization_scale, 0, min_inputs)
min_input_scaled = torch.min(torch.mul(min_inputs, equalization_scale_reshaped))
max_input_scaled = torch.max(torch.mul(max_inputs, equalization_scale_reshaped))
return min_input_scaled, max_input_scaled
with_args = classmethod(_with_args)
class _WeightEqualizationObserver(nn.Module):
r"""Observer for tracking the running min/max values of weight columns and
rows, and computing the quantization parameters for the weight rows.
Args:
dtype: Quantized data type
qscheme: Quantization scheme
quant_min: Minimum quantization value. If unspecified, it will
follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will
follow the 8-bit setup.
This observer is made up of 1 PerChannelMinMaxObserver `weight_col_obs` used
to record the running minimum and maximum of columns of incoming weight
tensors. This observer is intended to be used along with an
InputEqualizationObserver to calculate the equalization scale.
The running minimum/maximum :math:`w_\text{min/max}` are computed in the
same way as :class:`~torch.ao.quantization.observer.PerChannelMinMaxObserver`.
"""
def __init__(self, dtype=torch.qint8, qscheme=torch.per_tensor_affine, quant_min=None,
quant_max=None, factory_kwargs=None) -> None:
super(_WeightEqualizationObserver, self).__init__()
self.dtype = dtype
self.qscheme = qscheme
self.ch_axis = 1
self.weight_col_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
self.equalization_scale = torch.tensor(1)
def forward(self, w_orig):
if not (w_orig.ndim >= 2 and w_orig.ndim <= 5):
raise ValueError("InputEqualizationObserver only supports Linear and Conv layers")
return self.weight_col_obs(w_orig)
def get_weight_col_minmax(self):
return (self.weight_col_obs.min_val, self.weight_col_obs.max_val)
def set_equalization_scale(self, equalization_scale):
self.equalization_scale = equalization_scale
with_args = classmethod(_with_args)
def calculate_equalization_scale(input_obs: _InputEqualizationObserver,
weight_obs: _WeightEqualizationObserver) -> torch.Tensor:
r""" Calculates the equalization scale and sets the equalization_scale value
in the observers.
Args:
input_obs: Observer that tracks the ranges for the input columns
weight_obs: Observer that tracks the ranges for the weight columns
"""
(min_inputs, max_inputs) = input_obs.get_input_minmax()
(min_weights, max_weights) = weight_obs.get_weight_col_minmax()
if not (check_min_max_valid(min_inputs, max_inputs) and check_min_max_valid(min_weights, max_weights)):
warnings.warn(
"Must run observer before calling calculate_equalization_scale. " +
"Returning default equalization scale torch.tensor(1)."
)
return torch.tensor(1)
if not (min_inputs.shape == min_weights.shape):
raise ValueError(
"Input and Weight must have the same column dimension. " +
f"Found {min_inputs.shape} and {min_weights.shape} shapes instead."
)
equalization_scale = torch.sqrt((max_weights - min_weights) / (max_inputs - min_inputs))
# Replace all 'inf', 'nan', 0's with 1s to prevent errors
equalization_scale[equalization_scale == 0.] = 1
equalization_scale = torch.nan_to_num(equalization_scale, nan=1, posinf=1, neginf=1)
return equalization_scale
class EqualizationQConfig(namedtuple('EqualizationQConfig', ['input_activation', 'weight'])):
"""
Describes how to quantize a layer or a part of the network specifically for
input-weight equalization by providing settings (observer classes) for
inputs, outputs, and weights.
Note that EqualizationQConfig needs to contain observer **classes** (like
MinMaxObserver) or a callable that returns instances on invocation, not the
concrete observer instances themselves.
Quantization function will instantiate observers multiple times for each of
the layers.
Observer classes have usually reasonable default arguments, but they can be
overwritten with `with_args` method (that behaves like functools.partial):
my_qconfig = EqualizationQConfig(input_activation=_InputEqualizationObserver.with_args(dtype=torch.qint8),
weight=_WeightEqualizationObserver.with_args(dtype=torch.qint8))
"""
def __new__(cls, input_activation=torch.nn.Identity, weight=torch.nn.Identity):
if isinstance(input_activation, nn.Module) or isinstance(weight, nn.Module):
raise ValueError("EqualizationQConfig received observer instance, please pass observer class instead. " +
"Use MyObserver.with_args(x=1) to override arguments to constructor if needed")
self = super(EqualizationQConfig, cls).__new__(cls, input_activation, weight)
return self
input_equalization_observer = _InputEqualizationObserver.with_args(
dtype=torch.quint8, qscheme=torch.per_tensor_symmetric)
weight_equalization_observer = _WeightEqualizationObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric)
default_equalization_qconfig = EqualizationQConfig(input_activation=input_equalization_observer,
weight=weight_equalization_observer)
def fused_module_supports_equalization(module) -> bool:
""" Checks if the fused node supports equalization. """
return type(module) in [nni.LinearReLU, nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d]
def nn_module_supports_equalization(module) -> bool:
""" Checks if the torch.nn node supports equalization. """
return type(module) in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d]
def custom_module_supports_equalization(module) -> bool:
""" Checks if the custom node supports equalization. """
return type(module) in CUSTOM_MODULE_SUPP_LIST
def node_supports_equalization(node: Node, modules) -> bool:
""" Checks if the current node supports equalization
Currently we only support nn.Linear/F.Linear and nn.Conv/F.conv layers
"""
if node.op == 'call_module':
return nn_module_supports_equalization(modules[str(node.target)]) or \
fused_module_supports_equalization(modules[str(node.target)]) or \
custom_module_supports_equalization(modules[str(node.target)])
elif node.op == 'call_function':
return node.target in [F.linear, F.conv1d, F.conv2d, F.conv3d]
return False
def is_equalization_observer(observer: nn.Module) -> bool:
return (isinstance(observer, _InputEqualizationObserver) or
isinstance(observer, _WeightEqualizationObserver))
###############################################################################
# Functions for equalization during convert #
###############################################################################
def get_op_node_and_weight_eq_obs(
input_eq_obs_node: Node,
model: GraphModule,
modules: Dict[str, nn.Module]
) -> Tuple[Optional[Node], Optional[_WeightEqualizationObserver]]:
""" Gets the following weight equalization observer. There should always
exist a weight equalization observer after an input equalization observer.
Returns the operation node that follows the input equalizatoin observer node
and the weight equalization observer
"""
# Find the op node that comes directly after the input equaliation observer
op_node = None
for user in input_eq_obs_node.users.keys():
if node_supports_equalization(user, modules):
op_node = user
break
assert(op_node is not None)
if op_node.op == 'call_module':
# If the op_node is a nn.Linear layer, then it must have a
# WeightEqualizationObserver configuration
equalization_qconfig_map: Dict[str, Any] = model._equalization_qconfig_map # type: ignore[assignment]
assert(equalization_qconfig_map.get(op_node.name, None) is not None)
weight_eq_obs = equalization_qconfig_map.get(op_node.name, None).weight()
assert(isinstance(weight_eq_obs, _WeightEqualizationObserver))
return op_node, weight_eq_obs
elif op_node.op == 'call_function':
weight_node = maybe_get_weight_eq_obs_node(op_node, modules)
if weight_node is not None:
weight_eq_obs = modules[str(weight_node.target)]
assert(isinstance(weight_eq_obs, _WeightEqualizationObserver))
return op_node, weight_eq_obs
return None, None
def maybe_get_weight_eq_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> Optional[Node]:
""" Gets the weight equalization observer node if it exists.
"""
assert(op_node.op == 'call_function' and op_node.target in WEIGHT_INDEX_DICT)
for i, node_arg in enumerate(op_node.args):
if i in WEIGHT_INDEX_DICT[op_node.target]: # type: ignore[index]
assert(isinstance(node_arg, Node) and node_arg.op == 'call_module' and
isinstance(modules[str(node_arg.target)], _WeightEqualizationObserver))
return node_arg
return None
def maybe_get_next_input_eq_obs(node: Node, modules: Dict[str, nn.Module]) -> Optional[_InputEqualizationObserver]:
""" Gets the following input equalization observer if it exists.
For example, in the case of connecting linear layers:
x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2
If the node being passed in is the linear1 node, then we want to return eq_obs2,
the following equalization observer for linear2.
However, if there are no connecting layers:
x -> inp_obs1 -> eq_obs1 -> linear1 -> out_obs1 -> add
Then we want to return None.
In the case of an unfused linear-relu layer with a connecting linear layer:
linear1 -> relu -> out_obs1 -> eq_obs2 -> linear2 -> out_obs2
Since it is unfused, we want to skip over the relu layer and return eq_obs2,
the following equalization observer for linear2.
"""
assert(node_supports_equalization(node, modules))
# Locate the following nn.ReLU or F.relu node if it exists
maybe_relu_node = maybe_get_next_module(node, modules, nn.ReLU)
if maybe_relu_node is None:
maybe_relu_node = maybe_get_next_module(node, modules, target_functional_type=F.relu)
# Locate the following output observer if it exists.
# We will skip the relu node if it exists.
maybe_obs_node = (
maybe_get_next_module(node, modules, ObserverBase)
if maybe_relu_node is None
else maybe_get_next_module(maybe_relu_node, modules, ObserverBase)
)
if maybe_obs_node is None:
return None
maybe_eq_obs_node = maybe_get_next_module(maybe_obs_node, modules, _InputEqualizationObserver)
if maybe_eq_obs_node is None:
return None
maybe_eq_obs = modules[str(maybe_eq_obs_node)]
assert(isinstance(maybe_eq_obs, _InputEqualizationObserver))
return maybe_eq_obs
def maybe_get_next_equalization_scale(node: Node, modules: Dict[str, nn.Module]) -> Optional[torch.Tensor]:
""" If the next next node is an InputEqualizationObserver then we want to
return its equalization scale, else we return 1
This is used in the case where there are two connecting linear layers:
linear1 -> LinearOutObs -> InputEqObs -> linear2
In this case, the node given is linear1 and we want to locate the InputEqObs.
"""
next_inp_eq_obs = maybe_get_next_input_eq_obs(node, modules)
if next_inp_eq_obs:
if next_inp_eq_obs.equalization_scale.nelement() == 1 and \
next_inp_eq_obs.equalization_scale == torch.tensor(1):
return None
return next_inp_eq_obs.equalization_scale
return None
def scale_input_observer(node: Node, modules: Dict[str, nn.Module]) -> None:
""" Scales the following input quantization observer's min/max values by
updating the values with the scaled min/max values calculated by the input
equalization observer
"""
input_eq_obs = modules[str(node.target)]
assert(isinstance(input_eq_obs, _InputEqualizationObserver))
input_quant_obs_node = node.args[0]
assert(isinstance(input_quant_obs_node, Node))
input_quant_obs = modules[str(input_quant_obs_node.target)]
if not isinstance(input_quant_obs, ObserverBase):
return
min_input_scaled, max_input_scaled = input_eq_obs.calculate_scaled_minmax()
if min_input_scaled is None and max_input_scaled is None:
return
input_quant_obs.min_val = min_input_scaled
input_quant_obs.max_val = max_input_scaled
def scale_weight_node(
node: Node,
modules: Dict[str, nn.Module],
equalization_scale: torch.Tensor,
next_equalization_scale: Optional[torch.Tensor],
) -> None:
""" Scale the weights for input-weight equalization by multiplying the
weight by 1/equalization_scale and next_equalization_scale
Args:
node: Current node whose weights we want to scale
equalization_scale: Current node's calculated equalization scale
next_equalization_scale: Next node's calculated equalization scale if
the following node needs to be equalized, 1 otherwise
"""
if equalization_scale is None:
return
if fused_module_supports_equalization(modules[str(node.target)]):
op_module = modules[str(node.target)][0] # type: ignore[index]
else:
op_module = modules[str(node.target)]
assert(nn_module_supports_equalization(op_module) or custom_module_supports_equalization(op_module))
# Scale the weights for input-weight equalization
# If the following layer needs to be equalized then we will multiply its scale
weight = op_module.weight
assert(isinstance(weight, torch.Tensor))
# Scale the weights by the reciprocal of the equalization scale
# Reshape the equalization scale so that we can multiply it to the weight along axis=1
equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)
scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))
if next_equalization_scale is None:
op_module.weight = nn.Parameter(scaled_weight)
return
# Multiply the weights row wise by the next equalization scale
# Reshape the equalization scale so that we can multiply it to the weight along axis=0
next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, weight)
scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)
op_module.weight = nn.Parameter(scaled_weight)
# Multiply the bias element wise by the next equalization scale
bias = op_module.bias
if bias is None:
return
assert(isinstance(bias, torch.Tensor))
# Reshape the equalization scale so that we can multiply it element-wise to the bias
next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)
scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)
op_module.bias = nn.Parameter(scaled_bias)
def scale_weight_functional(
op_node: Node,
model: GraphModule,
modules: Dict[str, nn.Module],
equalization_scale: torch.Tensor,
next_equalization_scale: Optional[torch.Tensor],
) -> None:
""" Scales the weight value for functional layers
"""
if equalization_scale is None:
return
# From the given op_node, the path looks like:
# get_attr(weight) -> weight_quant_obs -> weight_eq_obs -> op_node
# So we want to trace back from the op_node to get the equalization observer
# node, then the quantization observer node, and then finally the weight
# node which contains the weight values.
# Get the equalization observer node
weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules)
if weight_eq_obs_node is None:
return
# Get the quantization observer node
weight_quant_obs_node = weight_eq_obs_node.args[0]
if weight_quant_obs_node is None:
return
assert(isinstance(weight_quant_obs_node, Node) and
isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase))
# Get the get_attr(weight) node
weight_node = weight_quant_obs_node.args[0]
if weight_node is None:
return
assert(isinstance(weight_node, Node) and weight_node.op == 'get_attr')
weight_parent_name, weight_name = _parent_name(weight_node.target)
weight = getattr(modules[weight_parent_name], weight_name)
# Scale the weights for input-weight equalization
# If the following layer needs to be equalized then we will multiply its scale
# Reshape the equalization scale so that we can multiply it to the weight along axis=1
equalization_scale_reshaped = reshape_scale(equalization_scale, 1, weight)
scaled_weight = torch.mul(weight, torch.reciprocal(equalization_scale_reshaped))
if next_equalization_scale is None:
setattr(modules[weight_parent_name], weight_name, scaled_weight)
return
# Multiply the weights row wise by the next equalization scale
# Reshape the equalization scale so that we can multiply it to the weight along axis=1
next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, scaled_weight)
scaled_weight = torch.mul(scaled_weight, next_equalization_scale_reshaped)
setattr(modules[weight_parent_name], weight_name, scaled_weight)
assert(torch.allclose(model.get_buffer(str(weight_node.target)), scaled_weight))
# Multiply the bias element wise by the next equalization scale
bias_node = None
for node in op_node.args:
# Find the node containing the weight values
if isinstance(node, Node) and node.op == 'get_attr' and 'bias' in node.name:
bias_node = node
break
if bias_node is None:
return
bias_parent_name, bias_name = _parent_name(bias_node.target)
bias = getattr(modules[bias_parent_name], bias_name)
# Reshape the equalization scale so that we can multiply it element-wise to the bias
next_equalization_scale_reshaped = reshape_scale(next_equalization_scale, 0, bias)
scaled_bias = torch.mul(bias, next_equalization_scale_reshaped)
setattr(modules[bias_parent_name], bias_name, scaled_bias)
def clear_weight_quant_obs_node(op_node: Node, modules: Dict[str, nn.Module]) -> None:
""" Given the operation node, we want find the corresponding quantization
observer and reset its min/max values
"""
weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules)
if weight_eq_obs_node is None:
return
weight_quant_obs_node = weight_eq_obs_node.args[0]
if weight_quant_obs_node is None:
return
assert(isinstance(weight_quant_obs_node, Node))
weight_quant_obs = modules[str(weight_quant_obs_node.target)]
assert(isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase))
weight_quant_obs.reset_min_max_vals() # type: ignore[operator]
def remove_node(model: GraphModule, node: Node, prev_node: Node):
""" Removes the given node from the model by replacing all of its users with
the given previous node
"""
# For all of the current node's users, replace the current node with
# the input quantization observer node
orig_users = list(node.users.keys())
for user_node in orig_users:
user_node.replace_input_with(node, prev_node)
# Erase the InputEqualizationObserver node
model.graph.erase_node(node)
def update_obs_for_equalization(model: GraphModule, modules: Dict[str, nn.Module]) -> Dict[str, _WeightEqualizationObserver]:
""" Update all of the observer's equalization scale. For each
InputEqualizationObserver, we will find the location of the next
WeightEqualizationObserver, create it, and calculate the equalization scale
based on the two observers.
We will then return a dictionary mapping operation node names to
the corresponding WeightEqualizationObservers for that operation.
"""
weight_eq_obs_dict = {}
for node in model.graph.nodes:
if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):
input_eq_obs = modules[node.target]
assert(isinstance(input_eq_obs, _InputEqualizationObserver))
op_node, weight_eq_obs = get_op_node_and_weight_eq_obs(node, model, modules)
if op_node is None or weight_eq_obs is None:
continue
if op_node.op == 'call_module':
# Calibrate the weight equalization observer since it has just
# been created
if fused_module_supports_equalization(modules[str(op_node.target)]):
module = modules[str(op_node.target)][0] # type: ignore[index]
assert(nn_module_supports_equalization(module))
weight_eq_obs(module.weight)
else:
weight_eq_obs(modules[str(op_node.target)].weight)
# Calculate and set the equalization scale values
equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs)
input_eq_obs.set_equalization_scale(equalization_scale)
weight_eq_obs.set_equalization_scale(equalization_scale)
weight_eq_obs_dict[op_node.name] = weight_eq_obs
return weight_eq_obs_dict
def convert_eq_obs(
model: GraphModule,
modules: Dict[str, nn.Module],
weight_eq_obs_dict: Dict[str, _WeightEqualizationObserver],
) -> None:
""" Converts the equalization operations and updates the other nodes in the
following way:
- Removes the input equalization observers and inserts a mul operator
along with an equalization scale node wherever applicable (we do not
want to insert a mul operator between connecting linear layers).
- Updates the input quantization observers with the scaled input min/max
values.
- Scales the weights by the current and next equalization scales.
- Removes the weight equalization observer node if it exists.
Before (after prepare):
weight values
|
WeightQuantObs
|
WeightEqObs
|
x -> InpQuantObs -> InpEqObs -> linear -> OutQuantObs
After this function:
scaled weight values
|
equalization scale WeightQuantObs
| |
x -> mul -> InpQuantObs (scaled min/max) -> linear -> OutQuantObs
After convert:
equalization scale scaled weight values
| |
x -> mul -> quantize_per_tensor -> quantized::linear
Note that although the equalization observer appeared after the quantization
observer after prepare_fx, the mul node appears before the quantization node
after convert_fx. This is because placing the equalization observer after
the quantization observer in prepare_fx would allow us to keep the invariant
that the graph before the current node inserts its observers is not
modified.
Having the equalization observer before the quantization observer would also
cause some inconsistences between the ordering of the quantization and
equalization observers.
For example, a single linear layer would look like:
x -> InpEqObs1 -> InpQuantObs1 -> linear1 -> OutQuantObs1
But between two connected linear layers, it would look like:
linear1 -> OutQuantObs1 -> InpEqObs2 -> linear2 -> OutQuantObs2
"""
for node in model.graph.nodes:
if node.op == 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver):
inp_quant_obs_node = node.args[0]
prev_node = inp_quant_obs_node.args[0]
# If the previous node is a layer that needs to be equalized, then
# we will remove the current node because we do not need to add any
# equalization nodes between two layers that need to be equalized
# Before: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> input_eq_obs2 (node) -> linear2
# After: linear1/relu (prev_node) -> output_quant_obs1 (inp_quant_obs_node) -> linear2
if node_supports_equalization(prev_node, modules) or "relu" in prev_node.name:
remove_node(model, node, inp_quant_obs_node)
continue
# Update the following input quantization observer's min/max values
scale_input_observer(node, modules)
# Remove the InputEqualization node and add a mul operator before
# the quantization observer node that appears before the equalization node
# Before: x -> input_quant_obs -> input_eq_obs -> linear
# After: x -> mul -> input_quant_obs -> linear
# Create a node containing the equalization scale
with model.graph.inserting_before(inp_quant_obs_node):
get_new_eq_scale_name = get_new_attr_name_with_prefix(prev_node.name + '_equalization_scale')
name = get_new_eq_scale_name(modules)
setattr(model, name, modules[node.target].equalization_scale)
eq_scale_node = model.graph.create_node('get_attr', name)
# Create a node multiplying the input with the equalization scale
with model.graph.inserting_after(eq_scale_node):
inputs = (prev_node, eq_scale_node)
mul_node = model.graph.create_node("call_function", torch.mul, inputs)
# Set the mul nod to be the input_quant_obs_node's input instead of
# the previous node
inp_quant_obs_node.replace_input_with(prev_node, mul_node)
remove_node(model, node, inp_quant_obs_node)
elif weight_eq_obs_dict.get(node.name, None) is not None:
weight_eq_obs = weight_eq_obs_dict.get(node.name)
assert(isinstance(weight_eq_obs, _WeightEqualizationObserver))
equalization_scale = weight_eq_obs.equalization_scale
if equalization_scale.nelement() == 1 and equalization_scale == torch.tensor(1):
equalization_scale = None # type: ignore[assignment]
maybe_next_equalization_scale = maybe_get_next_equalization_scale(node, modules)
# Scale the weight nodes
if node.op == 'call_module':
scale_weight_node(node, modules, equalization_scale, maybe_next_equalization_scale)
elif node.op == 'call_function':
scale_weight_functional(node, model, modules, equalization_scale, maybe_next_equalization_scale)
weight_eq_obs_node = maybe_get_weight_eq_obs_node(node, modules)
if weight_eq_obs_node is None:
return
assert(isinstance(modules[str(weight_eq_obs_node.target)], _WeightEqualizationObserver))
# Clear the quantization observer's min/max values so that they
# can get updated later based on the new scale values
clear_weight_quant_obs_node(node, modules)
# Erase the weight equalization observer node
prev_node = weight_eq_obs_node.args[0]
remove_node(model, weight_eq_obs_node, prev_node)
else:
raise ValueError("Expected operation node to be 'call_module' or 'call_function" +
f"Instead got node {node.name} as '{node.op}'.")
def _convert_equalization_ref(model: GraphModule):
""" Reference function which applies changes needed for equalization, but
does not quantize the nodes
"""
modules = dict(model.named_modules(remove_duplicate=False))
# Calculate the equalization scale, update the observers with the scaled
# inputs, and scale the weight
weight_eq_obs_dict = update_obs_for_equalization(model, modules)
convert_eq_obs(model, modules, weight_eq_obs_dict)
return GraphModule(model, model.graph)
###############################################################################
# Functions for running the equalized model on the Numeric Suite #
###############################################################################
def get_layer_sqnr_dict(model_a: nn.Module, model_b: nn.Module, x: torch.Tensor) -> Dict[str, float]:
""" Runs the Numeric Suite on model_a and model_b and returns a dictionary
containing the SQNR between layers in model_a and model_b.
Note: In order to support equalized models, this function has a hacky fix in
which we do not match any torch.mul operators. This is because equalized
models contain extra mul operators to scale the input by the equalization
scale, but this edge case has not been resolved yet within the numeric suite code.
Args:
model_a: A float model
model_b: A quantized model
x: Inputs to use during calibration
"""
import torch.ao.ns._numeric_suite_fx as ns
from torch.ao.ns.fx.mappings import get_unmatchable_types_map
unmatchable_types_map = get_unmatchable_types_map()
unmatchable_types_map["funs_unmatchable"].add(torch.mul)
model_a_ns, model_b_ns = ns.add_loggers(
'fp32', model_a,
'int8', model_b,
ns.OutputLogger,
unmatchable_types_map=unmatchable_types_map
)
model_a_ns(x)
model_b_ns(x)
activation_comparison_dict = ns.extract_logger_info(
model_a_ns,
model_b_ns,
ns.OutputLogger,
'int8')
ns.extend_logger_results_with_comparison(
activation_comparison_dict,
'fp32', 'int8',
torch.ao.ns.fx.utils.compute_sqnr, 'sqnr'
)
# Construct a dictionary mapping layer names to the SQNR values
layer_sqnr_dict = {}
for key in activation_comparison_dict:
layer = activation_comparison_dict[key]['node_output']['int8'][0]['fqn']
sqnr = activation_comparison_dict[key]['node_output']['int8'][0]['sqnr'][0]
layer_sqnr_dict[layer] = sqnr
return layer_sqnr_dict
def get_equalization_qconfig_dict(
layer_sqnr_dict: Dict[str, float],
num_layers_to_equalize: int
) -> Any:
""" Given the layer to SQNR dictionary, find the layers with the highest
quantization errors, and return an equalization_qconfig_dict
specifying to only equalize those top layers.
Args:
layer_sqnr_dict: Dictionary mapping layer names to SQNR values (found
when comparing an equalized model against a float model)
model_b: The equalized model used to construct the layer_sqnr_dict
num_layers_to_equalize: Number of layers with the highest quantization
errors to equalize
"""
# Sort the layer_sqnr_dictionary values and get the layers with the lowest
# SQNR values (aka highest quantization errors)
layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=lambda item: item[1])
layers_to_equalize = layer_sqnr_sorted[:num_layers_to_equalize]
# Constructs an equalization_qconfig_dict that specifies to only equalize
# the layers with the highest quantization errors
module_to_qconfig_list = list(
map(lambda item: (item[0], default_equalization_qconfig), layers_to_equalize)
)
equalization_qconfig_dict = {"module_name": module_to_qconfig_list}
return equalization_qconfig_dict
|
pytorch-master
|
torch/ao/quantization/fx/_equalize.py
|
from ._lower_to_native_backend import _lower_to_native_backend
from .graph_module import QuantizedGraphModule
from ..qconfig import QConfigAny
from typing import Dict, Tuple
def lower_to_qnnpack(
model: QuantizedGraphModule,
qconfig_map: Dict[str, QConfigAny],
node_name_to_scope: Dict[str, Tuple[str, type]]
) -> QuantizedGraphModule:
""" Lower a quantized reference model (with reference quantized operator patterns)
to qnnpack
"""
return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)
|
pytorch-master
|
torch/ao/quantization/fx/lower_to_qnnpack.py
|
from typing import Any, Dict, List, Optional, Set, Tuple, Union, Type
from torch.ao.quantization.quant_type import QuantType
import torch
import copy
import warnings
from torch.fx import (
GraphModule,
)
from torch.fx.graph import (
Graph,
Node,
Argument,
)
from ..utils import (
activation_is_statically_quantized,
weight_is_quantized,
get_qparam_dict,
_parent_name,
get_swapped_custom_module_class,
)
from ..qconfig import (
QConfigAny,
qconfig_equals
)
from ..qconfig_mapping import QConfigMapping
from ..qconfig_mapping_utils import (
update_qconfig_for_qat,
)
from .qconfig_utils import (
generate_qconfig_map,
compare_prepare_convert_qconfig_mappings,
update_qconfig_for_fusion,
is_qconfig_supported_by_dtype_configs,
)
from torch.ao.quantization.backend_config.utils import (
get_root_module_to_quantized_reference_module,
get_pattern_to_dtype_configs,
get_fused_module_classes,
get_qat_module_classes,
)
from torch.ao.quantization.backend_config import (
BackendConfig,
get_native_backend_config,
)
from .graph_module import (
QuantizedGraphModule,
is_observed_module,
is_observed_standalone_module,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from torch.nn.utils.parametrize import type_before_parametrizations
from .utils import (
get_custom_module_class_keys,
get_quantize_node_info,
create_getattr_from_value,
collect_producer_nodes,
graph_module_from_producer_nodes,
WEIGHT_INDEX_DICT,
)
from torch.ao.quantization.quantize import (
_remove_qconfig,
is_activation_post_process,
)
from .custom_config import (
ConvertCustomConfig,
PrepareCustomConfig,
)
from .lower_to_fbgemm import lower_to_fbgemm
# TODO: revisit this list. Many helper methods shouldn't be public
__all__ = [
"convert",
"convert_custom_module",
"convert_standalone_module",
"convert_weighted_module",
"duplicate_dequantize_node",
"duplicate_quantize_dynamic_node",
"get_module_path_and_prefix",
"has_none_qconfig",
"insert_dequantize_node",
"maybe_get_observer_for_node",
"maybe_recursive_remove_dequantize",
"remove_extra_dequantize",
"remove_quant_dequant_pairs",
"restore_state",
"run_weight_observers",
]
def restore_state(
observed: torch.nn.Module
) -> Tuple[Dict[str, Tuple[str, type]],
PrepareCustomConfig,
Set[str]]:
assert is_observed_module(observed), \
'incoming model must be produced by prepare_fx'
prepare_custom_config: PrepareCustomConfig = observed._prepare_custom_config # type: ignore[assignment]
node_name_to_scope: Dict[str, Tuple[str, type]] = observed._node_name_to_scope # type: ignore[assignment]
observed_node_names: Set[str] = observed._observed_node_names # type: ignore[assignment]
return node_name_to_scope, prepare_custom_config, observed_node_names
def has_none_qconfig(node: Argument, qconfig_map: Dict[str, QConfigAny]) -> bool:
""" Check if a node has a qconfig of None, i.e. user requested to not quantize
the node
"""
return isinstance(node, Node) and node.name in qconfig_map and qconfig_map[node.name] is None
def run_weight_observers(observed: GraphModule) -> None:
""" Extract the subgraph that produces the weight for dynamic quant
or weight only quant node and run the subgraph to observe the weight.
Note that the observers of dynamic quant or weight only quant ops are
run during the convert step.
"""
for node in observed.graph.nodes:
if node.op != 'call_function' or node.target not in WEIGHT_INDEX_DICT:
continue
for i, node_arg in enumerate(node.args):
if i not in WEIGHT_INDEX_DICT[node.target]:
continue
# node_arg is weight
weight_observer_nodes = collect_producer_nodes(node_arg)
if weight_observer_nodes is None:
continue
weight_observer_module = \
graph_module_from_producer_nodes(
observed, weight_observer_nodes)
# run the weight observer
weight_observer_module()
# this method is temporary will be removed soon
def duplicate_quantize_dynamic_node(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
quantized_root = quantized
for node in quantized.graph.nodes:
if (node.op == "call_function" and node.target == torch.quantize_per_tensor_dynamic):
users = list(node.users)
if len(users) > 1:
for user in users:
with quantized.graph.inserting_before(node):
new_node = quantized.graph.create_node(
"call_function",
torch.quantize_per_tensor_dynamic,
node.args,
node.kwargs)
user.replace_input_with(node, new_node)
quantized.graph.erase_node(node)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def duplicate_dequantize_node(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
"""
If a dequantize node has multiple uses, duplicate it and create one dequantize node for each use.
This is to enable the pattern matching to map from individual quant - dequant - ref_module to
final quantized module.
"""
quantized_root = quantized
for node in quantized.graph.nodes:
if (node.op == "call_method" and node.target == "dequantize" or
(node.op == "call_function" and node.target == torch.dequantize)):
users = list(node.users)
if len(users) > 1:
for user in users:
with quantized.graph.inserting_before(node):
new_node = quantized.graph.create_node("call_method", "dequantize", node.args, {})
user.replace_input_with(node, new_node)
quantized.graph.erase_node(node)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def remove_extra_dequantize(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
"""
Removes duplicate dequant nodes in the graph, for an operator that has multiple dequant nodes as a user,
replace them with a single dequant node that can be shared across all the uses.
"""
quantized_root = quantized
for node in quantized.graph.nodes:
users = list(node.users)
dequant_users = [user for user in node.users if user.op == "call_method" and user.target == "dequantize" or
(user.op == "call_function" and user.target == torch.dequantize)]
if len(dequant_users) > 1:
with quantized.graph.inserting_after(node):
unique_dq = quantized.graph.create_node("call_method", "dequantize", users[0].args, {})
for dequant in dequant_users:
dequant.replace_all_uses_with(unique_dq)
quantized.graph.erase_node(dequant)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def remove_quant_dequant_pairs(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
quantized_root = quantized
for node in quantized.graph.nodes:
if node.op == "call_function" and node.target in [torch.quantize_per_tensor, torch.quantize_per_channel]:
users = list(node.users)
user = users[0] if users else None
if len(users) == 1 and user.op == "call_method" and user.target == "dequantize":
user.replace_all_uses_with(node.args[0])
quantized.graph.erase_node(user)
orig_args = list(node.args)
quantized.graph.erase_node(node)
for arg in orig_args:
if isinstance(arg, Node) and len(list(arg.users)) == 0:
quantized.graph.erase_node(arg)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph):
""" If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node,
we'll recursively remove the dequantize Node
"""
if isinstance(arg, Node) and \
arg.op == "call_method" and \
arg.target == "dequantize":
quantize_node = arg.args[0]
# we only replace the specific use since dequantize could be used by other nodes
# as well
node.replace_input_with(arg, quantize_node)
elif isinstance(arg, (list, tuple)):
for arg_element in arg:
maybe_recursive_remove_dequantize(arg_element, node, graph)
elif isinstance(arg, dict):
for arg_element in arg.values():
maybe_recursive_remove_dequantize(arg_element, node, graph)
else:
warnings.warn(f"Unsupported node type in recursive remove dequantize: {type(arg)}")
def get_module_path_and_prefix(
obs_node: Node,
node_name_to_scope: Dict[str, Tuple[str, type]],
qconfig_map: Dict[str, QConfigAny]):
""" Given and observer node, get the `Scope` or the fully qualified name for
the submodule containing the observed node, also return a prefix of "_input"
when the observed node is an input of a F.linear op, and not the output of another
quantized op.
TODO: this logic is hacky, we should think about how to remove it or make it more
general
"""
observed_node = obs_node.args[0]
# an observer can be inserted for both input of the next operator or output of the previous
# operator (they can be the same)
# this flag identifies if the observer is inserted only because the observed node is
# the input of the next operator
assert isinstance(observed_node, Node), \
f"Expecting observed node to be a Node, but got {observed_node}"
is_input_observer_only = qconfig_map[observed_node.name] is None if observed_node.name in qconfig_map else None
if is_input_observer_only:
# if the quantize function is at the input of op, then we find the first user of the observer_node
# to get the path. If a linear call_function is in the user list, we return the first instance
# of linear node to get the FQN.
users = list(obs_node.users)
first_linear_use_or_first_use = users[0] if users else None
linear_node = None
for n in users:
if n.op == "call_function" and n.target == torch.nn.functional.linear:
linear_node = n
break
if linear_node:
first_linear_use_or_first_use = linear_node
prefix = "_input"
else:
# if the quantize function is at the output of the op, we use the observer input node to get the path
first_linear_use_or_first_use = observed_node
prefix = ""
if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope:
module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name]
else:
# TODO: it's not used, so actually we can skip quantization
# but this requires changing return type of quantize_node
# we can fix it later if needed
module_path = ""
return module_path, prefix
def insert_dequantize_node(
node: Node,
graph: Graph):
""" Inserts dequantize node for `node` in `graph`
"""
with graph.inserting_after(node):
dequantize_node = graph.call_method("dequantize", (node,))
for user_node in dict(node.users):
if user_node is not dequantize_node:
user_node.replace_input_with(node, dequantize_node)
def maybe_get_observer_for_node(
node: Node,
modules: Dict[str, torch.nn.Module]
) -> Optional[torch.nn.Module]:
"""
If the node is observed, return the observer
instance. Otherwise, return None.
"""
for maybe_obs_node, _ in node.users.items():
if maybe_obs_node.op == 'call_module':
maybe_obs = modules[str(maybe_obs_node.target)]
if is_activation_post_process(maybe_obs):
return maybe_obs
return None
def convert_standalone_module(
node: Node,
modules: Dict[str, torch.nn.Module],
model: torch.fx.GraphModule,
is_reference: bool,
backend_config: Optional[BackendConfig]):
""" Converts a observed standalone module to a quantized standalone module by calling
the fx convert api, currently using the same `is_reference` flag as parent, but we may
changing this behavior in the future (e.g. separating quantization and lowering for
standalone module as well)
Args:
- node: The call_module node of the observed standalone module
- modules: named_module of original model
- model: original model
- is_reference: a flag from parent provided by user to decide if we want to
produce a reference model or a fbgemm/qnnpack model
- backend_config: backend configuration of the target backend of quantization
"""
# TODO: remove is_reference flag
if is_reference:
convert_fn = torch.ao.quantization.quantize_fx.convert_to_reference_fx
else:
convert_fn = torch.ao.quantization.quantize_fx.convert_fx # type: ignore[attr-defined]
# We know that observed standalone module is a GraphModule since
# it's produced by us
observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]
sm_input_quantized_idxs = \
observed_standalone_module \
._standalone_module_input_quantized_idxs\
.tolist() # type: ignore[operator]
# remove the dequantize nodes for inputs
args = list(node.args)
for idx in range(len(args)):
if idx in sm_input_quantized_idxs:
arg = args[idx]
if arg.op == "call_method" and arg.target == "dequantize": # type: ignore[union-attr]
quantize_node = arg.args[0] # type: ignore[union-attr]
node.replace_input_with(arg, quantize_node)
if len(arg.users) == 0: # type: ignore[union-attr]
model.graph.erase_node(arg)
# add dequantize node for output
sm_output_quantized_idxs = \
observed_standalone_module \
._standalone_module_output_quantized_idxs \
.tolist() # type: ignore[operator]
if len(sm_output_quantized_idxs) > 0:
assert sm_output_quantized_idxs[0] == 0, "Currently only quantized"
"output idxs = [0] is supported"
# if it's non-empty, then it means the output is kept in quantized form
# we'll just add a dequantize node after this node
insert_dequantize_node(node, model.graph)
# TODO: allow convert_custom_config to override backend_config
# for standalone module
quantized_standalone_module = convert_fn(
observed_standalone_module,
backend_config=backend_config)
parent_name, name = _parent_name(node.target)
# update the modules dict
setattr(modules[parent_name], name, quantized_standalone_module)
modules[str(node.target)] = quantized_standalone_module
def convert_weighted_module(
node: Node,
modules: Dict[str, torch.nn.Module],
observed_node_names: Set[str],
qconfig_map: Dict[str, QConfigAny],
backend_config: BackendConfig):
""" Convert a weighted module to reference quantized module in the model
If the QConfig of a QAT module is not set, the module will still be converted to
a float module.
Args:
- node: The call_module node of the observed standalone module
- modules: named_module of original model
- observed_node_names: names for the set of observed fx node, we can skip
this conversion if the node is not observed
"""
original_module = modules[str(node.target)]
qconfig: QConfigAny = original_module.qconfig # type: ignore[assignment]
weight_post_process = None
qat_module_classes = get_qat_module_classes(backend_config)
if isinstance(
original_module,
qat_module_classes):
# Converting qat module to a float module, we need to attch
# weight fake_quant to the module, weight fake_quant is assumed to be run during
# QAT so we don't need to run it again here
weight_post_process = original_module.weight_fake_quant
original_module = original_module.to_float() # type: ignore[operator]
# change qat module to float module
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, original_module)
is_observed = node.name in observed_node_names
# If a qconfig is not defined for this node, then skip converting to a reference module
if qconfig is None or has_none_qconfig(node, qconfig_map) or not is_observed:
return
# skip converting to reference quantized module if the qconfig is not supported
pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config)
dtype_configs = pattern_to_dtype_configs.get(type(original_module), [])
if not is_qconfig_supported_by_dtype_configs(qconfig, dtype_configs):
return
# TODO: rename weight_is_statically_quantized to weight_is_int8_quantized
is_weight_quantized = weight_is_quantized(qconfig)
# the condition for swapping the module to reference quantized module is:
# weights need to be quantized
if not is_weight_quantized:
return
fused_module = None
float_module = original_module
# extract the inidividual float_module and fused module
if isinstance(original_module, torch.nn.intrinsic._FusedModule):
fused_module = float_module
float_module = fused_module[0] # type: ignore[index]
# TODO: move this to the reference quantized module
# weight_qparams or weight_qparams dict
wq_or_wq_dict = {}
if isinstance(float_module, torch.nn.RNNCellBase):
weight_post_process_ih = qconfig.weight() # type: ignore[union-attr, operator]
weight_post_process_hh = qconfig.weight() # type: ignore[union-attr, operator]
weight_post_process_ih(float_module.weight_ih)
weight_post_process_hh(float_module.weight_hh)
weight_qparams_ih = get_qparam_dict(weight_post_process_ih)
weight_qparams_hh = get_qparam_dict(weight_post_process_hh)
wq_or_wq_dict = {
"weight_ih": weight_qparams_ih,
"weight_hh": weight_qparams_hh,
}
elif isinstance(float_module, torch.nn.LSTM):
# format for wq_or_wq_dict (flattened attributes):
# {"weight_ih_l0_scale": ..., "weight_ih_l0_qscheme": ..., ...}
for wn in float_module._flat_weights_names:
if hasattr(float_module, wn) and wn.startswith("weight"):
weight = getattr(float_module, wn)
weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
if weight_post_process.dtype == torch.qint8: # type: ignore[union-attr]
weight_post_process(weight) # type: ignore[operator, misc]
wq_or_wq_dict[wn] = get_qparam_dict(weight_post_process)
else:
# weight_post_process is None means the original module is not a QAT module
# we need to get weight_post_process from qconfig in this case
if weight_post_process is None:
weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
# run weight observer
# TODO: This is currently a hack for QAT to get the right shapes for scale and zero point.
# In the future, we should require the user to calibrate the model after calling prepare
# Issue: https://github.com/pytorch/pytorch/issues/73941
weight_post_process(float_module.weight) # type: ignore[operator]
wq_or_wq_dict = get_qparam_dict(weight_post_process)
# We use the same reference module for all modes of quantization: static, dynamic, weight_only
# root_module_to_quantized_reference_module: module mapping from root (floating point) module class
# to quantized reference module class, e.g. nn.Conv2d to nn.quantized._reference.Conv2d
root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config)
ref_qmodule_cls = root_module_to_quantized_reference_module.get(type_before_parametrizations(float_module), None)
assert (
ref_qmodule_cls is not None
), f"No reference quantized module class configured for {type_before_parametrizations(float_module)}"
ref_qmodule = ref_qmodule_cls.from_float(float_module, wq_or_wq_dict) # type: ignore[attr-defined]
if fused_module is not None:
fused_module[0] = ref_qmodule # type: ignore[operator]
else:
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, ref_qmodule)
def convert_custom_module(
node: Node,
graph: Graph,
modules: Dict[str, torch.nn.Module],
custom_module_class_mapping: Dict[QuantType, Dict[Type, Type]],
statically_quantized_custom_module_nodes: Set[Node]):
""" Converts an observed custom module to a quantized custom module based on
`custom_module_class_mapping`
For static quantization, we'll also remove the previous `dequantize` node and
attach the observer node for output to the module, the observer for the node
will be converted to a dequantize node instead of quantize-dequantize pairs
later in the graph. In the end we would have a quantized custom module that
has the same interface as a default quantized module in nn.quantized namespace,
i.e. quantized input and quantized output.
Args:
- node: The call_module node of the observed standalone module
- graph: The graph containing the node
- modules: named_module of original model
- custom_module_class_mapping: mapping from observed custom module class to
quantized custom module class, used to swap custom modules
- statically_quantized_custom_module_nodes: we'll add the custom module node
if we find it is statically quantized, this will be used later when converting
observers to quant/dequant node pairs, if the observed node is a statically
quantized custom module nodes, we'll convert the observer to a dequantize node,
this is to keep the interface the same as the default quantized module.
TODO: maybe we want to redesign this part to align with reference model design
as well, but there has been some discussions around the interface, so we can do
it later.
"""
observed_custom_module = modules[str(node.target)]
maybe_obs = maybe_get_observer_for_node(node, modules)
qconfig = observed_custom_module.qconfig
if activation_is_statically_quantized(qconfig):
statically_quantized_custom_module_nodes.add(node)
# remove the previous dequant node
prev_node = node.args[0]
# expecting the input node for a custom module node to be a Node
assert isinstance(prev_node, Node), \
f"Expecting the argument for custom module node to be a Node, but got {prev_node}"
if prev_node.op == "call_method" and prev_node.target == "dequantize":
# change the connection for custom module, we'll change the input
# of custom module node to quantize node:
# Before: quantize - dequantize - custom - module
# After: quantize - custom - module
# \ - dequantize
node.replace_input_with(prev_node, prev_node.args[0])
# Remove the dequantize node if it doesn't have other users
if len(prev_node.users) == 0:
graph.erase_node(prev_node)
# absorb the following observer into the module conversion
activation_post_process = maybe_get_observer_for_node(node, modules)
assert activation_post_process is not None
observed_custom_module.activation_post_process = activation_post_process
# swap the observed custom module to quantized custom module
quantized_custom_module_class = get_swapped_custom_module_class(
observed_custom_module, custom_module_class_mapping, qconfig)
quantized_custom_module = \
quantized_custom_module_class.from_observed(observed_custom_module)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, quantized_custom_module)
def convert(
model: GraphModule, is_reference: bool = False,
convert_custom_config: Union[ConvertCustomConfig, Dict[str, Any], None] = None,
is_standalone_module: bool = False,
_remove_qconfig_flag: bool = True,
qconfig_mapping: Union[QConfigMapping, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None) -> torch.nn.Module:
"""
We will convert an observed model (a module with observer calls) to a reference
quantized model, the rule is simple:
1. for each observer module call in the graph, we'll convert it to calls to
quantize and dequantize functions based on the observer instance
2. for weighted operations like linear/conv, we need to convert them to reference
quantized module, this requires us to know whether the dtype configured for the
weight is supported in the backend, this is done in prepare step and the result
is stored in observed_node_names, we can decide whether we need to swap the
module based on this set
standalone_module means it a submodule that is not inlined in
parent module, and will be quantized separately as one unit.
Returns a quantized standalone module, whether input/output is quantized is
specified by prepare_custom_config, with
input_quantized_idxs, output_quantized_idxs, please
see docs for prepare_fx for details
"""
if convert_custom_config is None:
convert_custom_config = ConvertCustomConfig()
if isinstance(convert_custom_config, Dict):
warnings.warn(
"Passing a convert_custom_config_dict to convert is deprecated and will not be supported "
"in a future version. Please pass in a ConvertCustomConfig instead.")
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config)
if isinstance(qconfig_mapping, Dict):
warnings.warn(
"Passing a QConfig dictionary to convert is deprecated and will not be supported "
"in a future version. Please pass in a QConfigMapping instead.")
qconfig_mapping = QConfigMapping.from_dict(qconfig_mapping) if qconfig_mapping else None
qconfig_mapping = copy.deepcopy(qconfig_mapping)
assert(qconfig_mapping is None or isinstance(qconfig_mapping, QConfigMapping))
if isinstance(backend_config, Dict):
warnings.warn(
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
"in a future version. Please pass in a BackendConfig instead.")
backend_config = BackendConfig.from_dict(backend_config)
node_name_to_scope, prepare_custom_config, observed_node_names = restore_state(model)
qconfig_map: Dict[str, QConfigAny] = model._qconfig_map # type: ignore[assignment]
# mapping from fully qualified module name to module instance
# for example,
# {
# '': Model(...),
# 'linear': Linear(...),
# 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),
# }
# We use remove_duplicate=False here because torch.cat uses
# the same activation_post_process module instance but different names
modules = dict(model.named_modules(remove_duplicate=False))
# TODO refactor this code once we update the prepare logic to have additional information on
# which graph nodes have been observed and share that with convert to decide which observers to ignore.
if qconfig_mapping:
prepare_qconfig_mapping: QConfigMapping = model._qconfig_mapping # type: ignore[assignment]
modules_copy = copy.deepcopy(modules)
if model._is_qat:
update_qconfig_for_qat(qconfig_mapping, {})
update_qconfig_for_fusion(model, qconfig_mapping)
compare_prepare_convert_qconfig_mappings(prepare_qconfig_mapping, qconfig_mapping) # type: ignore[arg-type]
convert_qconfig_map = generate_qconfig_map(model, modules_copy, model.graph, qconfig_mapping, node_name_to_scope)
# check the convert_qconfig_map generated and ensure that all the values either match what was set in prepare qconfig_map
# or are set to None in the convert_qconfig_map.
for k, v in qconfig_map.items():
assert k in convert_qconfig_map, 'Expected key {} in convert qconfig_map'.format(k)
if convert_qconfig_map[k] is not None:
assert qconfig_equals(v, convert_qconfig_map[k]), \
"Expected k {} to have the same value in prepare and convert QConfigMappings, " \
"but {} was updated to {}".format(k, v, convert_qconfig_map[k])
qconfig_map = convert_qconfig_map
custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping)
custom_module_class_mapping = convert_custom_config.observed_to_quantized_mapping
if model._equalization_qconfig_map is not None:
# If we want to do equalization then do the following:
# Calculate the equalization scale, update the observers with the scaled
# inputs, and scale the weight
weight_eq_obs_dict = update_obs_for_equalization(model, modules)
convert_eq_obs(model, modules, weight_eq_obs_dict)
# always run weight observers in the top level forward method
# for dynamic quant ops or weight only quant ops
run_weight_observers(model)
graph_inputs: List[str] = []
for node in model.graph.nodes:
if node.op == 'placeholder':
graph_inputs.append(node.name)
# TODO: move this outside of this function
def replace_observer_with_quantize_dequantize_node(
model: torch.nn.Module,
graph: Graph,
node: Node,
modules: Dict[str, torch.nn.Module],
node_name_to_scope: Dict[str, Tuple[str, type]],
qconfig_map: Dict[str, QConfigAny]) -> None:
""" Replace activation_post_process module call node with quantize and
dequantize node
Before:
... -> observer_0(x) -> ...
After:
... -> torch.quantize_per_tensor(x, ...) -> x.dequantize() -> ...
"""
assert modules is not None
assert isinstance(node.target, str)
module_path, prefix = get_module_path_and_prefix(node, node_name_to_scope, qconfig_map)
observer_module = modules[node.target]
maybe_quantize_node_info = get_quantize_node_info(observer_module)
# Skip replacing observers to quant/dequant nodes if the qconfigs of all
# consumers and producers of this observer are None
skip_replacement = all([
has_none_qconfig(n, qconfig_map) for n in
list(node.args) + list(node.users.keys())])
if skip_replacement or maybe_quantize_node_info is None:
# didn't find correponding quantize op and info for the observer_module
# so we just remove the observer
with graph.inserting_before(node):
node.replace_all_uses_with(node.args[0])
graph.erase_node(node)
else:
# otherwise, we can convert the observer moduel call to quantize/dequantize node
node_type, quantize_op, qparams = maybe_quantize_node_info
# replace observer node with quant - dequant node
with graph.inserting_before(node):
input_node = node.args[0]
inputs = [input_node]
for key, value in qparams.items():
# TODO: we can add the information of whether a value needs to
# be registered as an attribute in qparams dict itself
if key in ['_scale_', '_zero_point_']:
# For scale and zero_point values we register them as buffers in the root module.
# TODO: maybe need more complex attr name here
qparam_node = create_getattr_from_value(model, graph, module_path + prefix + key, value)
inputs.append(qparam_node)
else:
# for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph.
inputs.append(value)
quantized_node = graph.create_node(node_type, quantize_op, tuple(inputs), {})
dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
node.replace_all_uses_with(dequantized_node)
graph.erase_node(node)
# this is a temporary hack for custom module, we may want to implement
# this properly after the custom module class design is finalized
def replace_observer_with_dequantize_node(node: Node, graph: Graph):
call_custom_module_node = node.args[0]
assert isinstance(call_custom_module_node, Node), \
f"Expecting the for call custom module node to be a Node, but got {call_custom_module_node}"
node.replace_all_uses_with(call_custom_module_node)
graph.erase_node(node)
insert_dequantize_node(call_custom_module_node, graph)
# additional state to override inputs to be quantized, if specified
# by the user
placeholder_node_seen_cnt = 0
input_quantized_idxs: List[int] = prepare_custom_config.input_quantized_indexes
output_quantized_idxs: List[int] = prepare_custom_config.output_quantized_indexes
if backend_config is None:
backend_config = get_native_backend_config()
root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config)
# convert tuples so that it can work with isinstance(module, tuple_of_classes)
root_module_classes = tuple(root_module_to_quantized_reference_module.keys())
qat_module_classes = get_qat_module_classes(backend_config)
fused_module_classes = get_fused_module_classes(backend_config)
statically_quantized_custom_module_nodes: Set[Node] = set()
for node in list(model.graph.nodes):
if node.op == 'placeholder':
cur_placeholder_node_idx = placeholder_node_seen_cnt
placeholder_node_seen_cnt += 1
if cur_placeholder_node_idx in input_quantized_idxs:
# Inputs are assumed to be quantized if the user specifid the
# input_quantized_idxs override.
# we need to dequantize the inputs since all operators took
# floating point inputs in reference quantized models
insert_dequantize_node(node, model.graph)
elif node.op == "output":
# If the argument is empty we don't need to do anything
if len(output_quantized_idxs) == 0:
continue
# Result are kept quantized if the user specified the
# output_quantized_idxs override.
# Remove the dequantize operator for the node in the end if any
return_node = node
output = node.args[0]
# outputs can be Node, list, tuple, dict, other cases are not supported yet
if isinstance(output, (list, tuple)):
for idx in output_quantized_idxs:
maybe_recursive_remove_dequantize(output[idx], return_node, model.graph)
elif isinstance(output, (Node, dict)):
# we treat dict as a single argument currently, but it can be extended
# to support {"key": dtype} after we change output_quantized_idxs to
# dict
if 0 in output_quantized_idxs:
maybe_recursive_remove_dequantize(output, return_node, model.graph)
else:
warnings.warn(f"Unsupported node type for output_quantized_idxs: {type(output)}")
elif node.op == "call_module":
if is_activation_post_process(modules[node.target]):
observed_node = node.args[0]
if observed_node in statically_quantized_custom_module_nodes:
replace_observer_with_dequantize_node(node, model.graph)
else:
replace_observer_with_quantize_dequantize_node(
model, model.graph, node, modules, node_name_to_scope,
qconfig_map)
elif is_observed_standalone_module(modules[node.target]):
convert_standalone_module(
node, modules, model, is_reference, backend_config)
# below this point `type_before_parametrizations` is used
# instead of `type` to handle situations with fx quant + sparsity
elif type_before_parametrizations(modules[node.target]) in set(
root_module_classes).union(qat_module_classes).union(fused_module_classes):
# extra check for fused module classes to make sure they are fused module classes
# of target modules
if type_before_parametrizations(modules[node.target]) in fused_module_classes and \
type_before_parametrizations(modules[node.target][0]) not in root_module_classes:
continue
convert_weighted_module(
node, modules, observed_node_names, qconfig_map, backend_config)
elif type_before_parametrizations(modules[node.target]) in custom_module_classes:
convert_custom_module(
node, model.graph, modules, custom_module_class_mapping,
statically_quantized_custom_module_nodes)
preserved_attributes = set(convert_custom_config.preserved_attributes)
model = QuantizedGraphModule(model, copy.deepcopy(model.graph), preserved_attributes)
# remove deadcode after converting observers to quant/dequant ops
model.graph.eliminate_dead_code()
model.recompile()
# TODO: maybe move this to quantize_fx.py
if not is_reference:
model = duplicate_dequantize_node(model)
model = duplicate_quantize_dynamic_node(model)
model = lower_to_fbgemm(model, qconfig_map, node_name_to_scope)
model = remove_quant_dequant_pairs(model)
model = remove_extra_dequantize(model)
# TODO: this looks hacky, we want to check why we need this and see if we can
# remove this
# removes qconfig and activation_post_process modules
if _remove_qconfig_flag:
_remove_qconfig(model)
return model
|
pytorch-master
|
torch/ao/quantization/fx/convert.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.