python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Module for defining "primitive" operations executable by the nvFuser. This
# list exists to decouple main set of primitives from the ones that provide a
# lowering of the op to nvFuser’s Python interface. Mostly torch.ops.nvprims is
# a subset of the primitives in torch.ops.prims, but some additional primitives
# can be added in the future for the corresponding higher-level torch/aten
# functions.
from typing import Any, Dict
import torch
from torch._prims_common import (
DimsSequenceType,
getnvFuserDtype,
ShapeType,
TensorLikeType,
)
from torch._prims_common.wrappers import backwards_not_supported
nvprim_namespace = "nvprims"
nvprim = torch.library.Library(nvprim_namespace, "DEF")
nvprim_impl = torch.library.Library(
nvprim_namespace, "IMPL", "CompositeExplicitAutograd"
)
nvprim_autograd_impl = torch.library.Library(nvprim_namespace, "IMPL", "Autograd")
nvprim_meta_impl = torch.library.Library(nvprim_namespace, "IMPL", "Meta")
nvprim_names = [
"abs",
"acos",
"asin",
"atan",
"atanh",
"cos",
"cosh",
"bitwise_not",
"ceil",
"erf",
"erfc",
"exp",
"expm1",
"floor",
"imag",
"isfinite",
"lgamma",
"log",
"log1p",
"log2",
"log10",
"real",
"reciprocal",
"neg",
"round",
"rsqrt",
"sign",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
"trunc",
"add",
"atan2",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"div",
"eq",
"fmod",
"ge",
"gt",
"le",
"lt",
"mul",
"ne",
"pow",
"remainder",
"sub",
"broadcast_in_dim",
"where",
"convert_element_type",
"sum",
"var",
"amax",
"amin",
]
_nvfuser_impls: Dict[str, Any] = {}
_nvfuser_unary_ops = {
"abs",
"acos",
"asin",
"atan",
"atanh",
"cos",
"cosh",
"bitwise_not",
"ceil",
"erf",
"erfc",
"exp",
"expm1",
"floor",
"imag",
"isfinite",
"lgamma",
"log",
"log1p",
"log2",
"log10",
"reciprocal",
"neg",
"real",
"round",
"rsqrt",
"sign",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
"trunc",
}
def _assert_nvfuser_op_exists(fname: str):
try:
from torch._C._nvfuser import FusionDefinition as fd # type: ignore[import]
assert getattr(fd.Operators, fname)
except ImportError:
# Not all PyTorch builds have nvfuser
pass
for fname in _nvfuser_unary_ops:
exec(
f"""
# Ensure that the nvfuser implementation exists
_assert_nvfuser_op_exists("{fname}")
def _{fname}_nvfuser(fd, a):
return fd.ops.{fname}(a) # type: ignore[attr-defined]
_nvfuser_impls["{fname}"] = _{fname}_nvfuser
"""
)
_nvfuser_binary_ops = {
"add",
"atan2",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"div",
"eq",
"fmod",
"ge",
"gt",
"le",
"lt",
"mul",
"ne",
"pow",
"remainder",
"sub",
}
for fname in _nvfuser_binary_ops:
exec(
f"""
# Ensure that the nvfuser implementation exists
_assert_nvfuser_op_exists("{fname}")
def _{fname}_nvfuser(fd, a, b):
return fd.ops.{fname}(a, b) # type: ignore[attr-defined]
_nvfuser_impls["{fname}"] = _{fname}_nvfuser
"""
)
_nvfuser_ternary_ops = {
"where",
}
for fname in _nvfuser_ternary_ops:
exec(
f"""
# Ensure that the nvfuser implementation exists
_assert_nvfuser_op_exists("{fname}")
def _{fname}_nvfuser(fd, a, b, c):
return fd.ops.{fname}(a, b, c) # type: ignore[attr-defined]
_nvfuser_impls["{fname}"] = _{fname}_nvfuser
"""
)
def _broadcast_in_dim_nvfuser(
fd: Any,
a: TensorLikeType,
shape: ShapeType,
broadcast_dimensions: ShapeType,
):
return fd.ops.broadcast_in_dim(a, shape, broadcast_dimensions) # type: ignore[attr-defined]
def _convert_element_type_nvfuser(fd: Any, a: TensorLikeType, dtype: torch.dtype):
nvfuser_dtype = getnvFuserDtype(dtype)
return fd.ops.cast(a, nvfuser_dtype) # type: ignore[attr-defined]
def _sum_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
):
keep_dims = False
output_dtype = torch._C._nvfuser.DataType.Null
return fd.ops.sum(a, dims, keep_dims, output_dtype)
def _var_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
*,
correction: int,
):
keep_dims = False
return fd.ops.var(a, dims, correction, keep_dims)
def _amax_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
):
keep_dims = False
return fd.ops.max(a, dims, keep_dims)
def _amin_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
):
keep_dims = False
return fd.ops.min(a, dims, keep_dims)
_nvfuser_impls["broadcast_in_dim"] = _broadcast_in_dim_nvfuser
_nvfuser_impls["convert_element_type"] = _convert_element_type_nvfuser
_nvfuser_impls["sum"] = _sum_nvfuser
_nvfuser_impls["var"] = _var_nvfuser
_nvfuser_impls["amax"] = _amax_nvfuser
_nvfuser_impls["amin"] = _amin_nvfuser
def register_nvprims():
"""Registers all nvFuser primitives in the torch.ops.nvprims module."""
for name in nvprim_names:
main_prim = getattr(torch.ops.prims, name)
nvprim.define(main_prim.schema)
nvprim_impl.impl(name, main_prim.prim_impl)
nvprim_meta_impl.impl(name, main_prim.prim_meta_impl)
prim_packet = getattr(torch.ops.nvprims, name)
prim = prim_packet.default
nvprim_autograd_impl.impl(name, backwards_not_supported(prim))
for p in (prim_packet, prim):
p.__doc__ = main_prim.__doc__
p.impl_nvfuser = _nvfuser_impls[name]
p.return_type = main_prim.return_type # type: ignore[attr-defined]
|
pytorch-master
|
torch/_prims/nvfuser_prims.py
|
from typing import Callable
from torch._prims.context import NvfuserPrimsMode, TorchRefsMode
from torch._prims.nvfuser_executor import nvfuser_execute, nvfuser_execute_partitioned
from torch.fx import GraphModule
from torch.fx.experimental.proxy_tensor import make_fx, wrapper_and_args_for_make_fx
def execute(gm: GraphModule, *args, executor: str = "aten"):
"""
Prototype ATen executor.
Just executes the context's graph.
"""
if executor == "aten":
return gm.forward(*args)
elif executor == "nvfuser":
return nvfuser_execute_partitioned(gm, *args)
elif executor == "strictly_nvfuser":
return nvfuser_execute(gm, *args)
msg = "Received unexpected value for 'executor': {0}. Allowed values are: aten, nvfuser.".format(
executor
)
raise ValueError(msg)
def make_traced(fn: Callable):
"""
Returns a function that, when called, will
trace its torch operations to prims and then
execute those prims on the requested trace executor
(possibly lowering them to that trace executor first).
Only supports the torch operations defined in _torch_to_reference_map
in context.py and operations with positional args. All args must
be tensors.
In the near future all these restrictions will be lifted.
Example usage:
def foo(a, b):
return torch.add(a, b)
traced_foo = make_traced(foo)
a = torch.randn((1, 2, 3, 4, 5), device='cuda')
b = torch.randn((1, 2, 3, 4, 5), device='cuda')
result = traced_foo(a, b, executor='nvfuser')
Executor may be either 'aten' or 'nvfuser'.
"""
def _traced(*args, executor="aten", **kwargs):
# TODO: caching
wrapped, all_args = wrapper_and_args_for_make_fx(fn, args, kwargs)
with NvfuserPrimsMode(), TorchRefsMode():
gm = make_fx(wrapped)(all_args)
return execute(gm, all_args, executor=executor)
return _traced
|
pytorch-master
|
torch/_prims/executor.py
|
# this is imported by test_deploy to do some checks in python
import sys
import subprocess
from pathlib import Path
# we've taken steps to clear out the embedded python environment,
# so we have to go searching for real python to figure out where its libraries are installed.
def python_path(cpath):
for maybe in cpath.split(':'):
candidate = Path(maybe) / "python"
if candidate.exists():
cmd = [str(candidate), '-c', 'import sys; print(":".join(sys.path))']
return subprocess.check_output(cmd).decode('utf-8').strip('\n').split(':')
raise RuntimeError('could not find real python')
def setup(path):
sys.path.extend(python_path(path))
sys.path.append('build/lib') # for our test python extension
# smoke test the numpy extension loading works
def numpy_test(x):
import numpy as np
xs = [np.array([x, x]), np.array([x, x])]
for i in range(10):
xs.append(xs[-1] + xs[-2])
return int(xs[-1][0])
|
pytorch-master
|
torch/csrc/deploy/test_deploy_python.py
|
from libfb.py import testutil
import test_deploy_python_ext
class TestDeployFromPython(testutil.BaseFacebookTestCase):
def test_deploy_from_python(self):
self.assertTrue(test_deploy_python_ext.run())
|
pytorch-master
|
torch/csrc/deploy/test_deploy_from_python.py
|
import numpy as np
import scipy
from scipy import linalg
print("Hello, torch::deploy unity!")
print(f"np.random.rand(5): {np.random.rand(5)}")
print(f"scipy {scipy}")
mat_a = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 2, 1, 0], [1, 3, 3, 1]])
mat_b = linalg.inv(mat_a)
print(mat_b)
|
pytorch-master
|
torch/csrc/deploy/unity/example.py
|
import torch
from torch import nn
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc = nn.Linear(256, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, X):
X = self.fc(X)
X = torch.relu(X)
X = self.fc2(X)
X = torch.softmax(X, dim=-1)
return X
|
pytorch-master
|
torch/csrc/deploy/unity/tests/simple_model.py
|
def func(*vlist):
return sum(vlist)
import sys
print("byebye!", file=sys.stderr)
|
pytorch-master
|
torch/csrc/deploy/unity/tests/sum.py
|
# used by the benchmarking program to wrap cpu models for GPU use
import torch
from copy import deepcopy
def to_device(i, d):
if isinstance(i, torch.Tensor):
return i.to(device=d)
elif isinstance(i, (tuple, list)):
return tuple(to_device(e, d) for e in i)
else:
raise RuntimeError('inputs are weird')
class GPUWrapper(torch.nn.Module):
def __init__(self, root):
super().__init__()
self.models = []
self.streams = {}
for i in range(torch.cuda.device_count()):
m = deepcopy(root) if i != 0 else root
d = f'cuda:{i}'
m.to(device=d)
self.models.append((m, d))
def __getstate__(self):
return self.models
def __setstate__(self, models):
super().__init__()
self.models = models
self.streams = {}
for m, d in models:
torch.cuda.synchronize(d)
# roi_align, 2210 count, ROIAlign_cuda.cu: add threadsync: problem goes away, return rand problem goes away,
# use different streams here, problem goes away.
def forward(self, tid, *args):
m, d = self.models[tid % len(self.models)]
if tid not in self.streams:
self.streams[tid] = torch.cuda.Stream(d)
s = self.streams[tid]
with torch.cuda.stream(s):
iput = to_device(args, d)
r = to_device(m(*iput), 'cpu')
return r
if __name__ == '__main__':
def check_close(a, b):
if isinstance(a, (list, tuple)):
for ae, be in zip(a, b):
check_close(ae, be)
else:
print(torch.max(torch.abs(a - b)))
assert torch.allclose(a, b)
import sys
from torch.package import PackageImporter
i = PackageImporter(sys.argv[1])
torch.version.interp = 0
model = i.loadPickle('model', 'model.pkl')
eg = i.loadPickle('model', 'example.pkl')
r = model(*eg)
gpu_model = GPUWrapper(model)
r2 = gpu_model(*eg)
check_close(r, r2)
|
pytorch-master
|
torch/csrc/deploy/example/gpu_wrapper.py
|
from typing import List, Any
import pickle
import torch
class TestTRTModule(torch.nn.Module):
def __init__(self, engine, input_names=None, output_names=None, fp16_output=False):
super(TestTRTModule, self).__init__()
self.engine = engine
self.input_names = input_names
self.output_names = output_names
# Indicate output is in fp16
self.fp16_output = fp16_output
def forward(self, *inputs):
batch_size = inputs[0].shape[0]
contiguous_inputs: List[torch.Tensor] = [i.contiguous() for i in inputs]
bindings: List[Any] = [None] * (len(self.input_names) + len(self.output_names))
# create output tensors
outputs: List[torch.Tensor] = []
for _, output_name in enumerate(self.output_names):
idx: int = self.engine.get_binding_index(output_name)
shape = (batch_size,) + tuple(self.engine.get_binding_shape(idx))
output = torch.empty(size=shape, dtype=torch.float32, device="cuda")
outputs.append(output)
bindings[idx] = output.data_ptr()
for i, input_name in enumerate(self.input_names):
idx = self.engine.get_binding_index(input_name)
bindings[idx] = contiguous_inputs[i].data_ptr()
context = self.engine.create_execution_context()
context.execute_async(
batch_size, bindings, torch.cuda.current_stream().cuda_stream
)
if len(outputs) == 1:
return outputs[0]
return tuple(outputs)
def make_trt_module():
import tensorrt as trt
logger = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(logger)
network = builder.create_network()
x = network.add_input("x", shape=(1, 2, 3), dtype=trt.float32)
layer = network.add_elementwise(x, x, trt.ElementWiseOperation.SUM)
layer.name = "add"
output = layer.get_output(0)
output.name = "output"
network.mark_output(output)
output.dtype = trt.float32
builder.max_batch_size = 1024
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 25
# Test engine can be serialized and loaded correctly.
serialized_engine = pickle.dumps(builder.build_engine(network, builder_config))
return TestTRTModule(pickle.loads(serialized_engine), ["x"], ["output"])
|
pytorch-master
|
torch/csrc/deploy/example/tensorrt_example.py
|
from typing import Tuple, List, Dict
import torch
import torch.nn as nn
from torch import Tensor
class Simple(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + input
return output
def load_library():
torch.ops.load_library("my_so.so")
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
class BatchedModel(nn.Module):
def forward(self, input1: Tensor, input2: Tensor) -> Tuple[Tensor, Tensor]:
return (input1 * -1, input2 * -1)
def make_prediction(
self, input: List[Tuple[Tensor, Tensor]]
) -> List[Tuple[Tensor, Tensor]]:
return [self.forward(i[0], i[1]) for i in input]
def make_batch(
self, mega_batch: List[Tuple[Tensor, Tensor, int]], goals: Dict[str, str]
) -> List[List[Tuple[Tensor, Tensor, int]]]:
max_bs = int(goals["max_bs"])
return [
mega_batch[start_idx : start_idx + max_bs]
for start_idx in range(0, len(mega_batch), max_bs)
]
class MultiReturn(torch.nn.Module):
def __init__(self):
super(MultiReturn, self).__init__()
def forward(self, t: Tuple[Tensor, Tensor]) -> Tuple[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor]]:
a, b = t
result = ((a.masked_fill_(b, 0.1), b), (torch.ones_like(a), b))
return result
multi_return_metadata = r"""
{
"metadata_container": {
"forward": {
"named_input_metadata": {
"t": {
"argument_type": {
"tuple": {
"tuple_elements": [
{
"tensor": 1
},
{
"tensor": 6
}
]
}
},
"optional_argument": false,
"metadata": {
"dense_features": {
"feature_desc": [
{
"feature_name": "test_feature_1",
"feature_id": 1
}
],
"expected_shape": {
"dims": [
-1,
1
],
"unknown_rank": false
},
"data_type": 1,
"feature_store_feature_type": 0
}
}
}
},
"positional_output_metadata": [
{
"argument_type": {
"tuple": {
"tuple_elements": [
{
"tensor": 1
},
{
"tensor": 6
}
]
}
},
"optional_argument": false,
"metadata": {
"dense_features": {
"feature_desc": [
{
"feature_name": "test_feature_1",
"feature_id": 1
}
],
"expected_shape": {
"dims": [
-1,
1
],
"unknown_rank": false
},
"data_type": 1,
"feature_store_feature_type": 0
}
}
},
{
"argument_type": {
"tuple": {
"tuple_elements": [
{
"tensor": 1
},
{
"tensor": 6
}
]
}
},
"optional_argument": false,
"metadata": {
"dense_features": {
"feature_desc": [
{
"feature_name": "test_feature_3",
"feature_id": 3
}
],
"expected_shape": {
"dims": [
-1,
1
],
"unknown_rank": false
},
"data_type": 1,
"feature_store_feature_type": 0
}
}
}
]
}
}
}
"""
|
pytorch-master
|
torch/csrc/deploy/example/examples.py
|
"""
Generate the example files that torchpy_test uses.
"""
import argparse
from pathlib import Path
import torch
from torch.package import PackageExporter
from torch.fx import symbolic_trace
try:
from .examples import Simple, resnet18, MultiReturn, multi_return_metadata, load_library, BatchedModel
except ImportError:
from examples import Simple, resnet18, MultiReturn, multi_return_metadata, load_library, BatchedModel
try:
from .fx.examples import SimpleWithLeaf
except ImportError:
from fx.examples import SimpleWithLeaf
try:
from .tensorrt_example import make_trt_module
except ImportError:
from tensorrt_example import make_trt_module
def generate_fx_example():
name = 'simple_leaf'
model = SimpleWithLeaf(5, 10)
graph_module : torch.fx.GraphModule = symbolic_trace(model)
with PackageExporter(str(p / (name + "_fx"))) as e:
e.intern("**")
e.save_pickle("model", "model.pkl", graph_module)
model_jit = torch.jit.script(model)
model_jit.save(str(p / (name + "_jit")))
def save(name, model, model_jit=None, eg=None, featurestore_meta=None):
with PackageExporter(str(p / name)) as e:
e.mock("iopath.**")
e.intern("**")
e.save_pickle("model", "model.pkl", model)
if eg:
e.save_pickle("model", "example.pkl", eg)
if featurestore_meta:
# TODO(whc) can this name come from buck somehow,
# so it's consistent with predictor_config_constants::METADATA_FILE_NAME()?
e.save_text("extra_files", "metadata.json", featurestore_meta)
if model_jit:
model_jit.save(str(p / (name + "_jit")))
parser = argparse.ArgumentParser(description="Generate Examples")
parser.add_argument("--install_dir", help="Root directory for all output files")
if __name__ == "__main__":
args = parser.parse_args()
if args.install_dir is None:
p = Path(__file__).parent / "generated"
p.mkdir(exist_ok=True)
else:
p = Path(args.install_dir)
resnet = resnet18()
resnet.eval()
resnet_eg = torch.rand(1, 3, 224, 224)
resnet_traced = torch.jit.trace(resnet, resnet_eg)
save("resnet", resnet, resnet_traced, (resnet_eg,))
simple = Simple(10, 20)
save("simple", simple, torch.jit.script(simple), (torch.rand(10, 20),))
multi_return = MultiReturn()
save("multi_return", multi_return, torch.jit.script(multi_return), (torch.rand(10, 20),), multi_return_metadata)
# used for torch deploy/package tests in predictor
batched_model = BatchedModel()
save("batched_model", batched_model)
with PackageExporter(str(p / "load_library")) as e:
e.mock("iopath.**")
e.intern("**")
e.save_pickle("fn", "fn.pkl", load_library)
generate_fx_example()
with PackageExporter(p / "uses_distributed") as e:
e.save_source_string("uses_distributed", "import torch.distributed; assert torch.distributed.is_available()")
with PackageExporter(str(p / "make_trt_module")) as e:
e.extern("tensorrt")
e.add_dependency("tensorrt")
e.mock("iopath.**")
e.intern("**")
e.save_pickle("make_trt_module", "model.pkl", make_trt_module)
|
pytorch-master
|
torch/csrc/deploy/example/generate_examples.py
|
import torch.fx
try:
from .some_dependency import a_non_torch_leaf
except ImportError:
from some_dependency import a_non_torch_leaf
torch.fx.wrap('a_non_torch_leaf')
class SimpleWithLeaf(torch.nn.Module):
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, input):
output = self.weight + a_non_torch_leaf(1, input)
return output
|
pytorch-master
|
torch/csrc/deploy/example/fx/examples.py
|
# dependency for torch package
def a_non_torch_leaf(a: int, b):
return a * b
|
pytorch-master
|
torch/csrc/deploy/example/fx/some_dependency.py
|
#!/usr/bin/env python3
import argparse
from torchgen.gen import parse_native_yaml, FileManager
import torchgen.model as model
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split('\n')
min_leading_spaces = min(map(num_leading_spaces, lines))
lines = [line[min_leading_spaces:] for line in lines]
return '\n'.join(lines)
def gen_external(native_functions_path, tags_path, external_path):
native_functions = parse_native_yaml(native_functions_path, tags_path)
func_decls = []
func_registrations = []
for func in native_functions:
schema = func.func
name = schema.name.name.base
args = schema.arguments
# Only supports extern calls for functions with out variants
if not schema.is_out_fn():
continue
# Doesn't currently support functions with more than one out parameter
if len(args.out) > 1:
continue
# Doesn't currently support kwarg arguments
if len(args.pre_tensor_options_kwarg_only) > 0 or len(args.post_tensor_options_kwarg_only) > 0:
continue
self_arg = [args.self_arg.argument] if args.self_arg is not None else []
args = list(args.pre_self_positional) + self_arg + list(args.post_self_positional)
tensor_args = [arg for arg in args if isinstance(arg.type, model.BaseType) and arg.type.name == model.BaseTy.Tensor]
if len(tensor_args) != len(args):
continue
arg_names = [None] * len(args)
tensor_decls = []
for idx, arg in enumerate(tensor_args):
s = f"const at::Tensor& {arg.name} = tensors[{idx + 1}];"
tensor_decls.append(s)
arg_names[idx] = arg.name
nl = '\n'
# print(tensor_decls, name, arg_names)
func_decl = f"""\
void nnc_aten_{name}(
int64_t bufs_num,
void** buf_data,
int64_t* buf_ranks,
int64_t* buf_dims,
int64_t* buf_strides,
int8_t* buf_dtypes,
int64_t args_num,
int64_t* extra_args) {{
std::vector<at::Tensor> tensors =
constructTensors(bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes);
at::Tensor& r = tensors[0];
{nl.join(tensor_decls)}
try {{
at::{name}_out({', '.join(['r'] + arg_names)});
}} catch (...) {{
}}
}}"""
func_registration = f"""\
const static RegisterNNCExternalFunction nnc_{name}(
"nnc_aten_{name}",
nnc_aten_{name});"""
func_decls.append(func_decl)
func_registrations.append(func_registration)
fm = FileManager(install_dir='.', template_dir='.', dry_run=False)
fm.write_with_template('external_functions_codegen.cpp', external_path,
lambda: {'external_registrations': func_registrations, 'external_functions': func_decls})
def main() -> None:
parser = argparse.ArgumentParser(
description='Generate annotated_fn_args script')
parser.add_argument('--native_functions',
help='path to native_functions.yaml',
default='../../../../aten/src/ATen/native/native_functions.yaml')
parser.add_argument('--tags',
help='path to tags.yaml',
default='../../../../aten/src/ATen/native/tags.yaml')
parser.add_argument('--template_path',
help='path to external_functions_codegen_template.cpp',
default='../../../../tools/jit/templates/external_functions_codegen_template.cpp')
args = parser.parse_args()
gen_external(args.native_functions, args.tags, args.template_path)
if __name__ == '__main__':
main()
|
pytorch-master
|
torch/csrc/jit/tensorexpr/codegen_external.py
|
import subprocess
import click
def test(cmd, limit):
print(f"Testing PYTORCH_JIT_OPT_LIMIT=tensorexpr_fuser={limit} {cmd}")
p = subprocess.run(
f"PYTORCH_JIT_OPT_LIMIT=tensorexpr_fuser={limit} {cmd}",
shell=True,
capture_output=True,
encoding="utf-8",
)
print(p.stdout)
f = "INTERNAL ASSERT FAILED"
if f in p.stdout or f in p.stderr:
print("skip")
return -1
if p.returncode == 0:
print("good")
return 1
print("bad")
return 0
@click.command()
@click.option("--cmd")
def bisect(cmd):
last_good = 0
first_bad = 10000
skips = set()
# Test if there are any unskipped commits in (last_good, first_bad)
def keep_going():
for limit in range(last_good + 1, first_bad):
if limit not in skips:
return True
return False
while keep_going():
test_limit = test_mid = (last_good + first_bad) // 2
val = -1
# Scan forward from mid towards bad.
while test_limit <= first_bad and val == -1:
val = test(cmd, test_limit)
if val == -1:
skips.add(test_limit)
test_limit = test_limit + 1
# If everything in [mid, bad] skipped, scan back towards good.
if val == -1:
test_limit = test_mid - 1
while test_limit >= last_good and val == -1:
val = test(cmd, test_limit)
if val == -1:
skips.add(test_limit)
test_limit = test_limit - 1
if val == 0:
first_bad = test_limit
elif val == 1:
last_good = test_limit
print(f"last good: {last_good}, first bad: {first_bad}")
if __name__ == "__main__":
bisect()
|
pytorch-master
|
torch/csrc/jit/tensorexpr/scripts/bisect.py
|
# Generates a C++ header files embedding the original input as a string literal
import argparse
import pathlib
from datetime import datetime
arg_parser = argparse.ArgumentParser(
description='Converts source files to C++ string literals', allow_abbrev=False)
arg_parser.add_argument('-i', '--input', required=True,
help='Input source file')
arg_parser.add_argument('-o', '--output', required=True,
help='Name of the generated header file')
args = arg_parser.parse_args()
# msvc string literal maximum length 16380
# https://docs.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2026?view=msvc-170
MAX_STRING_LITERAL = 16000
with open(args.input, 'r') as fin:
with open(args.output, 'w') as fout:
literal_name = f'{pathlib.Path(args.input).stem}_cu'
fout.write(f'// Generated from "{args.input}"\n')
fout.write(f'// {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n\n')
fout.write('namespace nvfuser_resources {\n\n')
fout.write(f'constexpr const char* {literal_name} = R"(\n')
accumulated_chars = 0
for line in fin:
accumulated_chars = accumulated_chars + len(line) + 1
if accumulated_chars >= MAX_STRING_LITERAL:
fout.write(')"\n')
fout.write('R"(\n')
fout.write(line)
accumulated_chars = len(line) + 1
else:
fout.write(line)
fout.write(')";\n')
fout.write('\n} // namespace nvfuser_resources\n')
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/tools/stringify_file.py
|
import torch
from torch._C._nvfuser import Fusion, FusionDefinition, DataType
# Construct and Define Fusion
fusion = Fusion()
with FusionDefinition(fusion) as fd :
t0 = fd.define_tensor(2, DataType.Double)
t1 = fd.define_tensor(2, DataType.Double)
t0h = fd.ops.cast(t0, DataType.Half)
t1h = fd.ops.cast(t1, DataType.Half)
t2 = fd.ops.add(t0h, t1h)
t3 = fd.ops.relu(t2)
fd.add_output(t3)
fusion.print_ir()
# Execute Fusion
input1 = torch.ones(2, 4, device='cuda', dtype=torch.float64)
input2 = torch.ones(2, 4, device='cuda', dtype=torch.float64)
# Kernel compilation should be cached for the 2nd iteration
# with input tensors of the same shape
for _ in range(5) :
outputs = fusion.execute([input1, input2])
print(outputs[0])
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/python_frontend/examples/double_half_cast.py
|
import torch
from torch._C._nvfuser import Fusion, FusionDefinition, DataType
# Construct and Define Fusion
fusion = Fusion()
with FusionDefinition(fusion) as fd :
t0 = fd.define_tensor(3, DataType.Half)
t1 = fd.define_tensor(1, DataType.Half)
s0 = fd.define_scalar()
c0 = fd.define_constant(3.0)
t2 = fd.ops.add(t0, t1)
t3 = fd.ops.mul(t2, c0)
t4 = fd.ops.mul(t3, s0)
t5 = fd.ops.relu(t4)
t6 = fd.ops.sum(t5, [-1], False, DataType.Float)
t7 = fd.ops.cast(t6, DataType.Half)
fd.add_output(t7)
fusion.print_ir()
# Execute Fusion
input1 = torch.ones(2, 4, 8, device='cuda', dtype=torch.float16)
input2 = torch.ones(8, device='cuda', dtype=torch.float16)
# Kernel compilation should be cached for the 2nd iteration
# with input tensors of the same shape
for _ in range(5) :
outputs = fusion.execute([input1, input2, 2.0])
print(outputs[0])
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/python_frontend/examples/python_example_fp16.py
|
import torch
from torch._C._nvfuser import Fusion, FusionDefinition, DataType
# Construct and Define Fusion
fusion = Fusion()
with FusionDefinition(fusion) as fd :
t0 = fd.define_tensor(3)
t1 = fd.define_tensor(3)
s0 = fd.define_scalar()
c0 = fd.define_constant(3.0)
t2 = fd.ops.add(t0, t1)
t3 = fd.ops.mul(t2, c0)
t4 = fd.ops.atan2(t3, s0)
t5 = fd.ops.relu(t4)
t6 = fd.ops.sum(t5, [-1], False, DataType.Float)
t7 = fd.ops.isfinite(t6)
fd.add_output(t6)
fd.add_output(t7)
fusion.print_ir()
# Execute Fusion
input1 = torch.ones(2, 4, 8, device='cuda')
input2 = torch.ones(2, 4, 8, device='cuda')
# Kernel compilation should be cached for the 2nd iteration
# with input tensors of the same shape
for _ in range(5) :
outputs = fusion.execute([input1, input2, 2.0])
print(outputs[0])
print(outputs[1])
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/python_frontend/examples/python_example.py
|
import torch
from torch._C._nvfuser import Fusion, FusionDefinition
import torch._prims as prims
import torch._refs as refs
# Construct and Define Fusion
fusion1 = Fusion()
with FusionDefinition(fusion1) as fd :
t0 = fd.define_tensor(1)
t1 = fd.define_tensor(3)
t0_b = fd.ops.broadcast_in_dim(t0, [2, 3, 4], [1])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
fusion1.print_ir()
# Execute Fusion
input1 = torch.randn(3, device='cuda')
input2 = torch.randn(2, 3, 4, device='cuda')
# Kernel compilation should be cached for the 2nd iteration
# with input tensors of the same shape
for _ in range(5) :
o = fusion1.execute([input1, input2])[0]
assert(o.shape == torch.Size([2, 3, 4]))
# Reference in prim torch
ref_o = refs.add(prims.broadcast_in_dim(input1, [2, 3, 4], [1]), input2)
assert(ref_o.allclose(o))
assert(ref_o.shape == o.shape)
fusion2 = Fusion()
input1 = torch.randn(1, 1, 4, device='cuda')
input2 = torch.randn(2, 3, 4, device='cuda')
with FusionDefinition(fusion2) as fd :
t0 = fd.define_tensor(sizes=input1.size(), strides=input1.stride())
t1 = fd.define_tensor(sizes=input2.size(), strides=input2.stride())
t0_b = fd.ops.broadcast_in_dim(t0, [2, 3, 4], [0, 1, 2])
t2 = fd.ops.add(t0_b, t1)
fd.add_output(t2)
fusion2.print_ir()
# Kernel compilation should be cached for the 2nd iteration
# with input tensors of the same shape
for _ in range(5) :
o = fusion2.execute([input1, input2])[0]
assert(o.shape == torch.Size([2, 3, 4]))
# Reference in prim torch
ref_o = refs.add(prims.broadcast_in_dim(input1, [2, 3, 4], [0, 1, 2]), input2)
assert(ref_o.allclose(o))
assert(ref_o.shape == o.shape)
# Construct and Define Fusion
fusion3 = Fusion()
with FusionDefinition(fusion3) as fd :
# t0 = fd.define_tensor(2)
t0 = fd.define_tensor([3, 1], [1, 1])
t1 = fd.define_tensor(1)
t1_b = fd.ops.broadcast_in_dim(t1, [3, 3], [0]) # 1 -> 0
t2 = fd.ops.add(t0, t1_b)
fd.add_output(t2)
fusion3.print_ir()
# Execute Fusion
input1 = torch.randn(3, 1, device='cuda')
input2 = torch.randn(3, device='cuda')
# Kernel compilation should be cached for the 2nd iteration
# with input tensors of the same shape
for _ in range(5) :
o = fusion3.execute([input1, input2])[0]
assert(o.shape == torch.Size([3, 3]))
# Reference in prim torch
ref_o = refs.add(input1, prims.broadcast_in_dim(input2, [3, 3], [0]))
assert(ref_o.allclose(o))
assert(ref_o.shape == o.shape)
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/python_frontend/examples/python_example_broadcast_in_dim.py
|
import torch
from torch._C._nvfuser import Fusion, FusionDefinition, DataType
# Construct and Define Fusion
fusion = Fusion()
with FusionDefinition(fusion) as fd :
t0 = fd.define_tensor(2, DataType.Half)
t1 = fd.define_tensor(2, DataType.Double)
t2 = fd.ops.add(t0, t1)
t5 = fd.ops.relu(t2)
fd.add_output(t5)
fusion.print_ir()
# Execute Fusion
input1 = torch.ones(2, 4, device='cuda', dtype=torch.float16)
input2 = torch.ones(2, 4, device='cuda', dtype=torch.float64)
# Kernel compilation should be cached for the 2nd iteration
# with input tensors of the same shape
for _ in range(5) :
outputs = fusion.execute([input1, input2])
print(outputs[0])
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/python_frontend/examples/half_double_cast.py
|
import torch
import nvfuser_extension # noqa: F401
t = torch.randn((5, 5), device='cuda')
expected = torch.sinh(t)
output = torch.ops.myop.sinh_nvfuser(t)
print("Expected:", expected)
print("Output:", output)
assert torch.allclose(output, expected)
print("They match!")
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/examples/sinh_extension/test.py
|
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='nvfuser_extension',
ext_modules=[
CUDAExtension(
name='nvfuser_extension',
pkg='nvfuser_extension',
sources=['main.cpp'])
],
cmdclass={
'build_ext': BuildExtension
})
|
pytorch-master
|
torch/csrc/jit/codegen/cuda/examples/sinh_extension/setup.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import torch._lazy
import torch._lazy.ts_backend
import torch._lazy.metrics
torch._lazy.ts_backend.init()
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(log_interval, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad(set_to_none=True)
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
torch._lazy.mark_step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if __name__ == '__main__':
bsz = 64
device = 'lazy'
epochs = 14
log_interval = 10
lr = 1
gamma = 0.7
train_kwargs = {'batch_size': bsz}
# if we want to use CUDA
if "LTC_TS_CUDA" in os.environ:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True,
'batch_size': bsz}
train_kwargs.update(cuda_kwargs)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('./data', train=True, download=True,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
for epoch in range(1, epochs + 1):
train(log_interval, model, device, train_loader, optimizer, epoch)
scheduler.step()
|
pytorch-master
|
torch/csrc/lazy/test_mnist.py
|
from torch._C._monitor import * # noqa: F403
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from torch.utils.tensorboard import SummaryWriter
STAT_EVENT = "torch.monitor.Stat"
class TensorboardEventHandler:
"""
TensorboardEventHandler is an event handler that will write known events to
the provided SummaryWriter.
This currently only supports ``torch.monitor.Stat`` events which are logged
as scalars.
>>> from torch.utils.tensorboard import SummaryWriter
>>> from torch.monitor import TensorboardEventHandler, register_event_handler
>>> writer = SummaryWriter("log_dir")
>>> register_event_handler(TensorboardEventHandler(writer))
"""
def __init__(self, writer: "SummaryWriter") -> None:
"""
Constructs the ``TensorboardEventHandler``.
"""
self._writer = writer
def __call__(self, event: Event) -> None:
if event.name == STAT_EVENT:
for k, v in event.data.items():
self._writer.add_scalar(k, v, walltime=event.timestamp.timestamp())
|
pytorch-master
|
torch/monitor/__init__.py
|
import weakref
import torch
from torch.multiprocessing.reductions import StorageWeakRef
from torch.utils._mode_utils import no_dispatch
def safe_is_leaf(t):
try:
return t.is_leaf
except RuntimeError:
# inference mode can trigger this
return False
# torch.Tensors cannot be used as a key in a dictionary
# because they define a custom __eq__ function which when used
# to resolve hash collisions will throw when comparing tensors:
# "RuntimeError: bool value of Tensor with more than one value is ambiguous."
# To avoid that, we use an object which will hold a Tensor and use
# its id for both hashing and equality.
# In order to use this as a weak key reference, we cannot
# simply use weakref.WeakKeyDictionary because the newly constructed
# WeakTensorRefKey only use would be a dictionary so it would have no strong
# references.
# To get around this issue, we can use it as a normal key, and then set
# `weakref.finalize` to delete the key when its contained tensor dies.
class WeakTensorRefKey(object):
def __init__(self, ten):
self.ten = weakref.ref(ten)
# store id since as soon as ten is deallocated
# the old id will no longer be recoverable, and
# we need to be able to remove the WeakTensorRefKey
# from the dictionary by hashing it to the same
# value it had when ten was alive
self.id = id(self.ten())
def __hash__(self):
return self.id
def __eq__(self, other):
if id(self) == id(other):
return True
return self.id == other.id
# This is a class for converting multiple tensors into meta tensors which
# share the same view/storage structure. The operation model is you allocate
# one of these, and then call it repeatedly on all the tensors you want to
# convert. It's important to use the same object for tensors you want to
# share storage because this is how we correlate shared storages to the same
# meta storages. This class will hold weak references to cached tenosrs
# and tensor storages.
class MetaConverter:
def __init__(self):
self.storage_memo = {}
self.tensor_memo = {}
self.maybe_storages_to_delete = []
self.check_expired_frequency = 128
self.check_expired_count = 0
self.hit = 0
self.miss = 0
self.del_hook = None
def successful(self):
return self.hit > 0 and self.miss == 0
def check_for_expired_weak_storages(self):
new_li = []
stor_to_delete = []
for obj in self.maybe_storages_to_delete:
if not obj.expired():
new_li.append(obj)
else:
stor_to_delete.append(obj)
for obj in stor_to_delete:
self.storage_memo.pop(obj, None)
self.maybe_storages_to_delete = new_li
# if for some reason we have aquired many storages which have not expired
# even though a tensor with their storage has expired (aliasing or otherwise)
# check for expired storages less often so as to bound the amount of work we
# do checking for expired storages
self.check_expired_frequency = max(
self.check_expired_frequency, len(self.maybe_storages_to_delete)
)
def get_tensor_memo(self, t):
return self.tensor_memo.get(WeakTensorRefKey(t), None)
def set_tensor_memo(self, t, v):
# hold a weak ref to self, otherwise it will be kept alive
# by the del_ten closure
self_weak_ref = weakref.ref(self)
if t.is_sparse:
weak_st = None
else:
weak_st = StorageWeakRef(t.storage())
tensor_ref_key = WeakTensorRefKey(t)
def del_ten():
# tensor outlives the converter
self_ref = self_weak_ref()
if self_ref is None:
return
# on shutdown, tensor_ref_key may not be in memo
self_ref.tensor_memo.pop(tensor_ref_key, None)
if weak_st and weak_st.expired():
self_ref.storage_memo.pop(weak_st, None)
elif weak_st is not None:
# [expired-storages]
# NB: even though the tensor has died,
# the deallocation of its storage can take longer,
# even when the storage has no other uses/views.
# In this case, the StorageWeakRef object will be kept alive
# longer than it needs to be, however the storage itself
# will be deallocated. We retain the possibly dead storages
# and periodically check if any of them are expired and
# can be freed.
self_ref.maybe_storages_to_delete.append(weak_st)
weakref.finalize(t, del_ten)
self.tensor_memo[tensor_ref_key] = v
# NB: doesn't actually return a storage, because meta storage is
# not supported
def meta_storage(self, s):
# NB: TypedStorage is freshly allocated and cannot be used as hash
# key index.
# Use a Weak Ref to s in order to not leak memory
swr = StorageWeakRef(s)
if swr not in self.storage_memo:
self.storage_memo[swr] = torch.empty(s.size(), dtype=s.dtype, device="meta")
return self.storage_memo[swr]
# This function assumes that it's possible to do the conversion
def meta_tensor(self, t):
# see expired-storages
self.check_expired_count += 1
if self.check_expired_count >= self.check_expired_frequency:
self.check_for_expired_weak_storages()
self.check_expired_count = 0
if self.get_tensor_memo(t) is None:
with torch.inference_mode(t.is_inference()):
if t.is_sparse:
is_leaf = safe_is_leaf(t)
r = torch.ops.aten._sparse_coo_tensor_with_dims(
t.sparse_dim(),
t.dense_dim(),
t.shape,
dtype=t.dtype,
layout=torch.sparse_coo,
device="meta",
)
r._coalesced_(t.is_coalesced())
if t.requires_grad:
r.requires_grad = True
if t.requires_grad and not is_leaf:
with torch.enable_grad():
r = r.clone()
r._coalesced_(t.is_coalesced())
elif t._is_view():
# Construct views in two steps: recursively meta-fy their
# base, and then create the view off that. NB: doing it
# directly from storage is WRONG because this won't cause
# version counters to get shared.
assert t._is_view()
base = self.meta_tensor(t._base)
def is_c_of_r(complex_dtype, real_dtype):
return (
utils.is_complex_dtype(complex_dtype)
and utils.corresponding_real_dtype(complex_dtype)
== real_dtype
)
if base.dtype == t.dtype:
pass
elif is_c_of_r(base.dtype, t.dtype):
base = torch.view_as_real(base)
elif is_c_of_r(t.dtype, base.dtype):
base = torch.view_as_complex(base)
else:
# This is not guaranteed to succeed. If it fails, it
# means there is another dtype-converting view function
# that hasn't been handled here
base = base.view(t.dtype)
with torch.enable_grad():
r = base.as_strided(t.size(), t.stride(), t.storage_offset())
else:
is_leaf = safe_is_leaf(t)
# Fake up some autograd history.
if t.requires_grad:
r = torch.empty(
(0,), dtype=t.dtype, device="meta", requires_grad=True
)
if not is_leaf:
with torch.enable_grad():
# The backward function here will be wrong, but
# that's OK; our goal is just to get the metadata
# looking as close as possible; we're not going to
# actually try to backward() on these produced
# metas. TODO: would be safer to install some
# sort of unsupported grad_fn here
r = r.clone()
else:
r = torch.empty((0,), dtype=t.dtype, device="meta")
# As long as meta storage is not supported, need to prevent
# redispatching on set_(Storage, ...) which will choke with
# meta storage
s = self.meta_storage(t.storage())
with no_dispatch():
with torch.no_grad():
r.set_(s, t.storage_offset(), t.size(), t.stride())
torch._C._set_conj(r, t.is_conj())
torch._C._set_neg(r, t.is_neg())
self.set_tensor_memo(t, r)
return self.get_tensor_memo(t)
def __call__(self, t):
# TODO: zero tensors? We appear to have eliminated them by
# excluding complex for now
if type(t) is torch.Tensor or type(t) is torch.nn.Parameter:
if any(
[
t.is_sparse_csr,
t.layout in [torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc],
t.is_mkldnn,
t.is_quantized,
t.is_nested,
t._is_view() and t._base is not None and t._base.is_sparse,
torch._is_functional_tensor(t),
# these are supported in meta conversion but the fallbacks
# don't work
t.is_neg(),
t.is_conj(),
t.device.type in ("lazy", "meta"),
# We need a way to test if a tensor is batched but there
# is no official APi to do it
# torch._C._is_batched(t),
]
):
# TODO: sparse should support meta
# NB technically to('meta') does work but our logging
# instrumentation will see the meta conversions and the
# tests all break so we just exclude this. In any case
# the to conversion isn't really right anyhow.
self.miss += 1
return t
else:
self.hit += 1
r = self.meta_tensor(t)
if type(t) is torch.nn.Parameter:
r = torch.nn.Parameter(r, requires_grad=r.requires_grad)
return r
elif torch.overrides.is_tensor_like(t):
# Blindly converting tensor subclasses to meta can cause
# unpredictable problems; e.g., FX tests will trace meta
# tensors into their trace / some subclasses don't correctly
# support meta. Trying to YOLO this is more trouble than it's
# worth.
self.miss += 1
return t
else:
# non-Tensor types don't count as hit or miss
return t
import torch._prims_common as utils
|
pytorch-master
|
torch/_subclasses/meta_utils.py
|
import torch
from torch._subclasses.fake_tensor import (
DynamicOutputShapeException,
FakeTensor,
FakeTensorMode,
UnsupportedFakeTensorException,
)
__all__ = [
"FakeTensor",
"FakeTensorMode",
"UnsupportedFakeTensorException",
"DynamicOutputShapeException",
]
|
pytorch-master
|
torch/_subclasses/__init__.py
|
import contextlib
import functools
import itertools
import weakref
from dataclasses import dataclass
from functools import partial
from typing import Callable, Union
import torch
import torch.fx.experimental.symbolic_shapes as symbolic_shapes
from torch._ops import OpOverload
from torch._subclasses.meta_utils import MetaConverter, WeakTensorRefKey
from torch.fx.operator_schemas import normalize_function
from torch.overrides import TorchFunctionMode
from torch.utils._mode_utils import no_dispatch
from torch.utils._python_dispatch import enable_torch_dispatch_mode, TorchDispatchMode
from torch.utils._pytree import tree_flatten, tree_map
aten = torch.ops.aten
@dataclass
class UnsupportedFakeTensorException(RuntimeError):
reason: str
@dataclass
class DynamicOutputShapeException(RuntimeError):
func: OpOverload
_device_not_kwarg_ops = (
aten._resize_output_.default,
aten.nested_tensor.default,
aten.nested_tensor.out,
aten.pin_memory.default,
aten.is_pinned.default,
aten.to.device,
aten.to.prim_Device,
aten._pin_memory.default,
aten._pin_memory.out,
aten._resize_output.default,
aten._resize_output.out,
)
# this op is never actually used
_non_kwarg_device_constructors = (aten._list_to_tensor,)
def contains_tensor_types(type):
tensor_type = torch._C.TensorType.get()
return type.isSubtypeOf(tensor_type) or any(
contains_tensor_types(e) for e in type.containedTypes()
)
_like_tensor_constructors = (
aten.empty_like.default,
aten.empty_like.out,
aten.full_like.default,
aten.full_like.out,
aten.ones_like.default,
aten.ones_like.out,
aten.rand_like.default,
aten.rand_like.out,
aten.randn_like.default,
aten.randn_like.out,
aten.randint_like.default,
aten.randint_like.out,
aten.randint_like.low_dtype,
aten.randint_like.low_dtype_out,
aten.zeros_like.default,
aten.zeros_like.out,
aten.new_empty.default,
aten.new_empty.out,
aten.new_empty_strided.default,
aten.new_empty_strided.out,
aten.new_full.default,
aten.new_full.out,
aten.new_zeros.default,
aten.new_zeros.out,
aten.new_ones.default,
aten.new_ones.out,
)
@functools.lru_cache(None)
def _is_tensor_constructor(func: OpOverload):
assert isinstance(func, OpOverload)
schema = func._schema
if any(contains_tensor_types(arg.type) for arg in schema.arguments):
return False
# TODO: no real reason to restrict multiple outputs
return (
len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get()
)
# Similar to `MetaConverter`, this is a class for converting
# multiple tensors into fake tensors which share the same view/storage
# structure. Like `MetaConverter`, it uses `WeakTensorRefKey` to
# hold a weak reference for all memoized tensors.
class FakeTensorConverter(object):
tensor_memo: weakref.WeakValueDictionary
meta_converter: MetaConverter
def __init__(self):
# FakeTensors store the FakeTensorMode which in turn stores a
# FakeTensor, so we need to hold a weak reference to the FakeTensor
# otherwise we would induce a circular reference
self.tensor_memo = weakref.WeakValueDictionary()
self.meta_converter = MetaConverter()
def _get_memo(self, t):
if WeakTensorRefKey(t) in self.tensor_memo:
out = self.tensor_memo[WeakTensorRefKey(t)]
out._fix_weakref()
return out
return None
def set_tensor_memo(self, t, v):
th = WeakTensorRefKey(t)
# hold a weak ref to self, otherwise it will be kept alive
# by the del_ten closure
self_weak_ref = weakref.ref(self)
def del_ten():
self_ref = self_weak_ref()
if self_ref is None:
return
# on shutdown, th may not be in memo
self_ref.tensor_memo.pop(th, None)
weakref.finalize(t, del_ten)
self.tensor_memo[th] = v
def from_real_tensor(self, fake_mode, t):
maybe_memo = self._get_memo(t)
if maybe_memo is not None:
return maybe_memo
existing_device = t.device
# not yet supported in metatensors
if t.is_quantized:
raise UnsupportedFakeTensorException("quantized nyi in meta tensors")
with no_dispatch():
meta_t = self.meta_converter(t)
if meta_t.device.type != "meta":
raise UnsupportedFakeTensorException("meta converter nyi")
out = FakeTensor(fake_mode, meta_t, existing_device)
if type(t) is torch.nn.Parameter:
out = torch.nn.Parameter(out, requires_grad=out.requires_grad) # type: ignore[assignment]
if t.grad is not None:
out.grad = self.from_real_tensor(fake_mode, t.grad)
self.set_tensor_memo(t, out)
return out
def from_meta_and_device(self, fake_mode, t, device):
maybe_memo = self._get_memo(t)
if maybe_memo is not None:
return maybe_memo
out = FakeTensor(fake_mode, t, device)
self.set_tensor_memo(t, out)
return out
# There are two ways to call this. First, you can have manually constructed
# a meta tensor and you need to turn it into a fake tensor. In that case,
# pass a meta tensor and a device argument. Alternately, you can have a
# real tensor that you need to convert into a fake tensor; in that case,
# omit the device.
#
# The disallowed case: if you specify the device, it MUST be a meta tensor.
# However, you're allowed to pass a meta tensor to be turned into a fake
# tensor; although an odd thing to do, this can occur if you're doing
# cross ref testing and the inner test is already operating on meta tensors
def __call__(self, fake_mode, t, device=None):
if device is None:
return self.from_real_tensor(fake_mode, t)
else:
assert t.device.type == "meta"
return self.from_meta_and_device(fake_mode, t, device)
op_implementations = []
def register_op_impl(run_impl_check: Union[Callable[[OpOverload], bool], OpOverload]):
def impl_decorator(op_impl):
global op_implementations
if isinstance(run_impl_check, OpOverload):
op_implementations.append((lambda func: func == run_impl_check, op_impl))
else:
op_implementations.append((run_impl_check, op_impl))
return op_impl
return impl_decorator
@register_op_impl(
lambda func: (_is_tensor_constructor(func) or func in _like_tensor_constructors)
)
def constructors(fake_mode, func, *args, **kwargs):
assert func not in _non_kwarg_device_constructors
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
if func in _like_tensor_constructors:
default_device = new_kwargs["input"].device
# TODO: file issue
args = (new_kwargs.pop("input"),)
else:
# cpu is default device if none is specified
default_device = torch.device("cpu")
args = ()
out_device = new_kwargs.pop("device", None)
out_device = out_device if out_device is not None else default_device
new_kwargs["device"] = torch.device("meta")
r = func(*args, **new_kwargs)
return FakeTensor(fake_mode, r, out_device)
@register_op_impl(lambda func: func in (aten.to.prim_Device, aten.to.device))
def non_kwarg_to(fake_mode, func, *args, **kwargs):
_, new_kwargs = normalize_function(
func, args, kwargs, normalize_to_only_use_kwargs=True
)
input_device = new_kwargs["device"]
out_device = input_device if input_device else new_kwargs["input"].device
new_kwargs["device"] = torch.device("meta")
r = func(*args, **new_kwargs)
return fake_mode.fake_tensor_converter(fake_mode, r, out_device)
# Dont default to default device handling,
# since the device of `the_template` is ignored
@register_op_impl(aten.resize_as_.default)
def resize_as_(fake_mode, func, *args, **kwargs):
return func(*args, **kwargs)
@register_op_impl(aten._sparse_coo_tensor_with_dims_and_tensors.default)
def _sparse_coo_tensor_with_dims_and_tensors(fake_mode, func, *args, **kwargs):
# TODO: remove me
return constructors(fake_mode, func, *args, **kwargs)
# _to_copy fails when run with FakeTensors to cuda device
# TODO: debug
@register_op_impl(aten._to_copy.default)
def to_copy(fake_mode, func, *args, **kwargs):
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
input_device = new_kwargs.pop("device", None)
out_device = input_device if input_device else new_kwargs["input"].device
with no_dispatch():
input = new_kwargs.pop("input").to("meta")
return FakeTensor(fake_mode, aten._to_copy(input, **new_kwargs), out_device)
@register_op_impl(aten.clone.default)
def clone(fake_mode, func, input, memory_format=None):
out_device = input.device
with no_dispatch():
out = aten._to_copy(input.to("meta"), memory_format=memory_format)
return FakeTensor(fake_mode, out, out_device)
# index.Tensor data-dependent in only some conditions
@register_op_impl(
lambda func: torch.Tag.dynamic_output_shape in func.tags # type: ignore[attr-defined]
and func != aten.index.Tensor
)
def data_dep_op(fake_mode, func, *args, **kwargs):
raise DynamicOutputShapeException(func)
# Bool Indices get Expanded as Masks
# See: IndexingUtils.h:expandTensors
def check_no_bool_index_tensors(func, self, indices):
for index in indices:
if index is not None and index.dtype in (torch.bool, torch.uint8):
raise DynamicOutputShapeException(func)
def run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs):
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
out_device = new_kwargs["input"].device
with in_kernel_invocation_manager(fake_mode):
out = func(*args, **kwargs)
return FakeTensor(fake_mode, out, out_device)
# Dont default to default device handling,
# Since op can take in non-zero sized cpu
# index tensors with cuda self
@register_op_impl(aten.index.Tensor)
def index_tensor(fake_mode, func, *args, **kwargs):
# dynamic shape op if indices are bool/uint8
check_no_bool_index_tensors(func, *args, **kwargs)
return run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs)
# takes in multiple-devices, dont default to default device handling
@register_op_impl(aten.index_put.default)
def index_put(fake_mode, func, *args, **kwargs):
return run_and_return_new_tensor_of_input_device(fake_mode, func, args, kwargs)
# same with index_put, but return the input
@register_op_impl(aten.index_put_.default)
def index_put_(fake_mode, func, *args, **kwargs):
with in_kernel_invocation_manager(fake_mode):
out = func(*args, **kwargs)
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
return new_kwargs["input"]
# Meta tensors give you the ability to run PyTorch code without having to
# actually do computation through tensors allocated on a `meta` device.
# Because the device is `meta`, meta tensors do not model device propagation.
# FakeTensor extends MetaTensors to also carry an additional `fake_device`
# which tracks devices that would have been used.
@contextlib.contextmanager
def in_kernel_invocation_manager(fake_mode):
fake_mode.in_kernel_invocation = True
# See: note [Fake Tensor Dispatch Keys]
torch._C._add_meta_to_tls_dispatch_include()
try:
yield
finally:
fake_mode.in_kernel_invocation = False
torch._C._remove_meta_from_tls_dispatch_include()
class FakeTensor(torch.Tensor):
fake_device: torch.device
fake_mode: "FakeTensorMode"
has_sym_ints: bool
# Note: [Fake Tensor Dispatch Keys]
# In order to model the behavior of device-specific autocast
# and autograd logic, we update the dispatch keys of FakeTensors
# to reflect their fake device. This includes the BackendComponent
# (DispatchKey::Meta -> DispatchKey::CUDA), and also the BackendComponent
# related Autocast and Autograd keys. __torch__dispatch__ sits below
# Autocast and Autograd, and is only invoked when we are at the
# kernel for the BackendComponent. Then, we add Meta to the
# thread-local dispatch include set to hit the meta kernel
# instead of the kernel of the BackendComponent for the fake device.
# The `device_for_backend_keys` does that below
@staticmethod
def __new__(cls, fake_mode, elem, device):
return torch.Tensor._make_subclass(
cls,
elem,
elem.requires_grad,
dispatch_device=True,
device_for_backend_keys=device,
)
def __init__(self, fake_mode, elem, device: Union[torch.device, str]):
assert elem.device.type == "meta", elem.device.type
device = device if isinstance(device, torch.device) else torch.device(device)
# NB: it is fine, if a little confusing, for device to be meta
# (we are faking a meta tensor in that case). However, it often
# indicates some sort of confusion (e.g., you accidentally passed
# in a meta tensor when you should have passed in the real tensor).
# So by default we disallow meta, and if you are working in a situation
# where it is helpful (e.g., crossref testing) you can turn it back
# on
if not fake_mode.allow_meta:
assert device.type != "meta"
# normalize cuda device.
if device.type == "cuda" and device.index is None:
device = torch.device(f"cuda:{torch.cuda.current_device()}")
self.fake_device = device
self.fake_mode = fake_mode
self.has_sym_ints = symbolic_shapes.has_symbolic_sizes_strides(elem)
@staticmethod
def from_tensor(t, fake_mode):
existing_device = t.device
# TODO: this should use meta converter
return FakeTensor(fake_mode, t.to(device="meta"), existing_device)
# TODO: resolve error in default __repr__
def __repr__(self):
with in_kernel_invocation_manager(self.fake_mode):
self_repr = super().__repr__()
return f"FakeTensor({self.fake_mode}, {self_repr}, {self.fake_device})"
def stride(self):
if self.has_sym_ints:
# TODO: As we currently don't support symbolic strides, we'll assume contiguous strides
# The reason this needs to be here instead of __torch_dispatch__ is that
# when aten.stride goes into __torch_dispatch__, it expects a list of
# concrete ints to be returned. So we need to short-circuit that entirely
return symbolic_shapes.create_contiguous(self.shape)
return super().stride()
def new(self, *args, **kwargs):
# torch.Tensor.new does not go through the normal dispatcher pattern
# so in order to use the same pattern as normal invocation of
# returning meta device within the kernel we need to intercept
# the call here
# because it doesn't go through the dispatcher, we run into errors
# when attempting to compute an output in meta, so
# we compute the real tensor then convert to meta
out_device = self.fake_device
with no_dispatch():
real_out = super().new(*args, **kwargs)
assert not isinstance(real_out, FakeTensor), real_out
assert real_out.device.type != "meta", real_out.device
with no_dispatch():
meta_out = MetaConverter()(real_out)
return FakeTensor(self.fake_mode, meta_out, out_device)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
# need to handle here to avoid infinite recursion
# see [in_kernel_invocation]
if func == torch.ops.prim.device.default:
assert len(args) == 1 and isinstance(args[0], FakeTensor)
if args[0].fake_mode.in_kernel_invocation:
return torch.device("meta")
else:
return args[0].fake_device
# Need this to handle infinite recursion with sparse tensors.
# Sparse tensors have custom stride policy which means that
# they will dispatch here on dispatch, and we need to trigger
# the default behavior.
# TODO: when we get other tensor types online they will also
# need to get entries here.
elif func == torch.ops.aten.stride.default:
return None
# Because fake mode can return NotImplemented (if it sees a subclass
# it doesn't know how to deal with), this test here is important
# because the next dispatch after a fake mode will attempt to use
# subclasses of tensors to dispatch, and any FakeTensor arguments
# will be considered eligible.
if any(not issubclass(t, FakeTensor) and t is not torch.Tensor for t in types):
return NotImplemented
fake_mode = None
for arg in itertools.chain(tree_flatten(args)[0], tree_flatten(kwargs)[0]):
if isinstance(arg, FakeTensor):
if fake_mode is None:
fake_mode = arg.fake_mode
else:
assert fake_mode is arg.fake_mode, "Mixing modes NYI"
with enable_torch_dispatch_mode(fake_mode):
return func(*args, **kwargs)
@staticmethod
def _find_common_device(func, args, kwargs):
# cpu - zero-dim tensors can be called in cuda kernels,
# so overwrite the common_device if it the only existing
# device comes from a cpu zero-dim tensor
common_device = None
is_cpu_zero_dim = None
def cpu_zero_dim(t):
return t.device.type == "cpu" and t.dim() == 0
def merge_devices(t):
nonlocal common_device
nonlocal is_cpu_zero_dim
if not isinstance(t, FakeTensor):
return
if common_device is None:
common_device = t.device
is_cpu_zero_dim = cpu_zero_dim(t)
return
t_is_cpu_zero_dim = cpu_zero_dim(t)
if t.device == common_device:
if is_cpu_zero_dim:
is_cpu_zero_dim = t_is_cpu_zero_dim
return
# mismatching devices !
# if current tensor is cpu 0 dim, defer to existing device
if t_is_cpu_zero_dim:
return
# current device is from cpu 0 dim tensor, overwrite
if is_cpu_zero_dim:
common_device = t.device
is_cpu_zero_dim = t_is_cpu_zero_dim
return
# mismatching devices of non-zero dim tensors, throw
# This might be valid behavior and need to be explicitly modeled, e.g. reshape_as
raise RuntimeError(
f"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}"
)
tree_map(merge_devices, args)
tree_map(merge_devices, kwargs)
assert common_device is not None, f"Could not find common device for {func}"
return common_device
__torch_function__ = torch._C._disabled_torch_function_impl
# We keep one instantiation of `fake_tensor_converter` active
# for the duration of `with torch_enable_mode(FakeTensorMode)`.
# This allows accurate storage aliasing across invocation of
# different operators. While this will keep all freshly allocated
# tensors alive during `FakeTensorMode`, there will no be no
# new allocations of Tensors which have non-meta storage so
# memory should not significantly incraese.
class FakeTensorMode(TorchDispatchMode):
def __init__(self, *, allow_fallback_kernels=True, allow_meta=False):
self.allow_fallback_kernels = allow_fallback_kernels
self.fake_tensor_converter = FakeTensorConverter()
self.allow_meta = allow_meta
# [in_kernel_invocation]
# when FakeTensor is invoked in user code, .device should return
# the fake_device of the tensor so that code such as as `if x.is_cuda`
# or torch.zeros([10, 10], device=x.device) continues to execute as if
# the FakeTensor were real. However, within kernel execution, we return
# the `Meta` device because all computation within the kernels should
# behave as if the Tensors are on meta devices. Kernels should allocate
# new tensors on meta devices, and checks like `is_meta` should return true.
# within python refs, we always return the real device by defining
# the device property
self.in_kernel_invocation = False
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
if func == torch.ops.prim.device.default:
assert len(args) == 1 and isinstance(args[0], FakeTensor)
if args[0].fake_mode.in_kernel_invocation:
return torch.device("meta")
else:
return args[0].fake_device
flat_arg_tensors = [
i for i in tree_flatten((args, kwargs))[0] if isinstance(i, FakeTensor)
]
has_symbolic_sizes = any([i.has_sym_ints for i in flat_arg_tensors])
if has_symbolic_sizes:
# TODO: Find better approach for this
# Avoid circular import
from torch._decomp import decomposition_table
from torch._meta_registrations import meta_table
# TODO: hack, doesn't actually work.
# see https://github.com/pytorch/pytorch/pull/81598#issuecomment-1192030435
with enable_torch_dispatch_mode(
self
), torch.overrides.enable_reentrant_dispatch():
if func in meta_table:
r = meta_table[func](*args, **kwargs)
return r
if func in decomposition_table:
return decomposition_table[func](*args, **kwargs)
with no_dispatch():
if symbolic_shapes.is_symbolic_op(func):
return symbolic_shapes.handle_symbolic_op(func, args, kwargs)
if func == aten.size.default:
raise RuntimeError(
"Trying to call aten.size on a tensor with symbolic shapes. "
"It's likely that this is from calling tensor.shape in C++"
)
# prims already wrap FakeTensor inputs to FakeTensor outputs
# and do device logic, we dont need do anything but run them
# and ensure that Meta kernels are dispatched to (see)
# Fake Tensor Dispatch Keys
if "prims::" in func._schema.name and len(flat_arg_tensors) != 0:
try:
torch._C._add_meta_to_tls_dispatch_include()
with no_dispatch():
return func(*args, **kwargs)
finally:
torch._C._remove_meta_from_tls_dispatch_include()
if has_symbolic_sizes:
constructors = [aten.empty.SymInt]
if func not in constructors:
raise RuntimeError(
f"{func} - couldn't find symbolic meta function/decomposition"
)
with no_dispatch():
# TODO: apply as no_dispatch decorator
converter = self.fake_tensor_converter
# if we are in the dispatch mode, we will enter this function even if the inputs
# are not FakeTensors. For now, throw if any non-Fake Tensor inputs
# and just support constructors. TODO: extend more broadly
conversion_made = False
subclass_seen = False
def check_non_fake_tensor(x):
nonlocal conversion_made, subclass_seen
conversion_made = conversion_made or (
isinstance(x, torch.Tensor) and not isinstance(x, FakeTensor)
)
subclass_seen = subclass_seen or (
isinstance(x, torch.Tensor)
and not isinstance(x, FakeTensor)
and type(x) is not torch.Tensor
and type(x) is not torch.nn.Parameter
)
tree_map(check_non_fake_tensor, args)
tree_map(check_non_fake_tensor, kwargs)
# Suppose we enable fake tensor mode. This means that fake tensor
# mode will run first. But what if we do an operation that
# involves a tensor subclass that will desugar into normal tensor
# operations? Without this line, fake tensor mode will run first,
# decide that a conversion was made (since there was a non fake
# tensor argument), and report an error that converting non
# fake tensor is not supported. What we actually wanted to happen
# was to give the subclass a chance to figure out what it wants to
# before erroring out. Returning NotImplemented here allows this.
#
# NB: If you're seeing a mysterious infinite loop involving fake
# tensor, it might be related to this line. Though I'm not sure
# how you'll know to read this comment, as this line won't show up
# in the stack trace.
if subclass_seen:
return NotImplemented
# this is generated from torch.tensor(), which does not use the
# dispatcher, to allow wrapper subclasses to wrap the new tensor
# we need to handle before error checking
if func in [
aten.lift_fresh.default,
aten.lift_fresh_copy.default,
]:
assert (
len(kwargs) == 0
and len(args) == 1
and type(args[0]) is torch.Tensor
), f"{args} {kwargs}"
with no_dispatch():
return converter(self, args[0])
if conversion_made:
raise Exception(
"Invoking operators with non-Fake Tensor inputs in FakeTensorMode is not yet supported. "
f"Please convert all Tensors to FakeTensors first. Found in {func}(*{args}, **{kwargs})"
)
for run_impl_check, op_impl in op_implementations:
if run_impl_check(func):
return op_impl(self, func, *args, **kwargs)
try:
with in_kernel_invocation_manager(self):
r = func(*args, **kwargs)
except NotImplementedError as not_implemented_error:
if not self.allow_fallback_kernels:
raise not_implemented_error
return run_fallback_kernel(
self, func, args, kwargs, not_implemented_error
)
# TODO: handle non-kwarg devices
assert func not in _device_not_kwarg_ops, f"NYI: {func}"
# Lazily initialized, in case there are no tensor returns
common_device = None
def wrap(e, device=None):
nonlocal common_device
if isinstance(e, torch.Tensor) and not isinstance(e, FakeTensor):
if common_device is None:
common_device = FakeTensor._find_common_device(
func, args, kwargs
)
return converter(self, e, device or common_device)
else:
return e
# if device is specified, use that
if kwargs.get("device", None):
return tree_map(partial(wrap, device=kwargs["device"]), r)
return tree_map(partial(wrap), r)
def from_tensor(self, tensor):
return self.fake_tensor_converter(self, tensor)
# NB: returns fake tensors
def run_fallback_kernel(fake_mode, func, args, kwargs, orig_not_implemented_exception):
# these should all be supported, just to be safe
# avoid fallback for operators which inplace modify metadata
# because the input fake tensors would be umodified
if torch.Tag.inplace_view in func.tags: # type: ignore[attr-defined]
raise orig_not_implemented_exception
with no_dispatch():
inp_impls = {}
def to_real_tensor(e):
if isinstance(e, FakeTensor):
out = torch.zeros_like(e, device=e.fake_device)
if e.is_sparse:
out._coalesced_(e.is_coalesced())
inp_impls[id(out)] = e
return out
return e
args = tree_map(to_real_tensor, args)
kwargs = tree_map(to_real_tensor, kwargs)
r = func(*args, **kwargs)
tensor_impls = set()
storages = set()
for e in tree_flatten((args, kwargs))[0]:
if isinstance(e, torch.Tensor):
if not e.is_sparse:
storages.add(e.storage()._cdata)
# TODO: also check metadata change on inputs
# proper aliasing/metadata relationship between outputs and inputs will
# not be set up, bc of conversion to device, unless we can reuse an
# input impl
for e in tree_flatten(r)[0]:
if id(e) not in inp_impls and (
isinstance(e, torch.Tensor)
and not e.is_sparse
and e.storage()._cdata in storages
):
raise orig_not_implemented_exception
def map_out(e):
if isinstance(e, torch.Tensor):
if id(e) in inp_impls:
return inp_impls[id(e)]
else:
return fake_mode.fake_tensor_converter(fake_mode, e)
else:
return e
return tree_map(map_out, r)
# Just for use to allow copying a module to fake tensors,
# does not apply elsewhere
class FakeCopyMode(TorchFunctionMode):
def __init__(self, fake_mode):
self.fake_mode = fake_mode
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
# clone will get called in Parameter deepcopy
if func == torch._C._TensorBase.clone:
return func(self.fake_mode.from_tensor(args[0]), **kwargs)
elif func == torch.Tensor.__deepcopy__:
assert len(args) == 2 and len(kwargs) == 0
tensor, memo = args
if id(tensor) in memo:
return memo[id(tensor)]
out = self.fake_mode.from_tensor(tensor)
memo[id(tensor)] = out
return out
else:
with torch._C.DisableTorchFunction():
return func(*args, **kwargs)
|
pytorch-master
|
torch/_subclasses/fake_tensor.py
|
from __future__ import annotations
from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union
import torch
__all__ = ['Future', 'collect_all', 'wait_all']
T = TypeVar("T")
S = TypeVar("S")
class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef]
pass
class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta):
r"""
Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous
execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It
also exposes a set of APIs to add callback functions and set results.
.. warning:: GPU support is a beta feature, subject to changes.
"""
def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None):
r"""
Create an empty unset ``Future``. If the future is intended to hold
values containing CUDA tensors, (a superset of) their CUDA devices must
be specified at construction. (This is only supported if
``torch.cuda.is_available()`` returns ``True``). This is needed to
ensure proper CUDA stream synchronization. The child futures, returned
by the ``then`` method, will inherit these devices.
Args:
devices(``List[Union[int, str, torch.device]]``, optional): the set
of devices on which tensors contained in this future's value are
allowed to reside and on which callbacks are allowed to operate.
"""
if devices is None:
devices = []
super().__init__([torch.device(d) for d in devices])
def done(self) -> bool:
r"""
Return ``True`` if this ``Future`` is done. A ``Future`` is done if it
has a result or an exception.
If the value contains tensors that reside on GPUs, ``Future.done()``
will return ``True`` even if the asynchronous kernels that are
populating those tensors haven't yet completed running on the device,
because at such stage the result is already usable, provided one
performs the appropriate synchronizations (see :meth:`wait`).
"""
return super().done()
def wait(self) -> T:
r"""
Block until the value of this ``Future`` is ready.
If the value contains tensors that reside on GPUs, then an additional
synchronization is performed with the kernels (executing on the device)
which may be asynchronously populating those tensors. Such sync is
non-blocking, which means that ``wait()`` will insert the necessary
instructions in the current streams to ensure that further operations
enqueued on those streams will be properly scheduled after the async
kernels but, once that is done, ``wait()`` will return, even if those
kernels are still running. No further synchronization is required when
accessing and using the values, as long as one doesn't change streams.
Returns:
The value held by this ``Future``. If the function (callback or RPC)
creating the value has thrown an error, this ``wait`` method will
also throw an error.
"""
return super().wait()
def value(self) -> T:
r"""
Obtain the value of an already-completed future.
This method should only be called after a call to :meth:`wait` has
completed, or inside a callback function passed to :meth:`then`. In
other cases this ``Future`` may not yet hold a value and calling
``value()`` could fail.
If the value contains tensors that reside on GPUs, then this method will
*not* perform any additional synchronization. This should be done
beforehand, separately, through a call to :meth:`wait` (except within
callbacks, for which it's already being taken care of by :meth:`then`).
Returns:
The value held by this ``Future``. If the function (callback or RPC)
creating the value has thrown an error, this ``value()`` method will
also throw an error.
"""
return super().value()
def then(self, callback: Callable[[Future[T]], S]) -> Future[S]:
r"""
Append the given callback function to this ``Future``, which will be run
when the ``Future`` is completed. Multiple callbacks can be added to
the same ``Future``, but the order in which they will be executed cannot
be guaranteed (to enforce a certain order consider chaining:
``fut.then(cb1).then(cb2)``). The callback must take one argument, which
is the reference to this ``Future``. The callback function can use the
:meth:`value` method to get the value. Note that if this ``Future`` is
already completed, the given callback will be run immediately inline.
If the ``Future``'s value contains tensors that reside on GPUs, the
callback might be invoked while the async kernels that are populating
those tensors haven't yet finished executing on the device. However, the
callback will be invoked with some dedicated streams set as current
(fetched from a global pool) which will be synchronized with those
kernels. Hence any operation performed by the callback on these tensors
will be scheduled on the device after the kernels complete. In other
words, as long as the callback doesn't switch streams, it can safely
manipulate the result without any additional synchronization. This is
similar to the non-blocking behavior of :meth:`wait`.
Similarly, if the callback returns a value that contains tensors that
reside on a GPU, it can do so even if the kernels that are producing
these tensors are still running on the device, as long as the callback
didn't change streams during its execution. If one wants to change
streams, one must be careful to re-synchronize them with the original
streams, that is, those that were current when the callback was invoked.
Args:
callback(``Callable``): a ``Callable`` that takes this ``Future`` as
the only argument.
Returns:
A new ``Future`` object that holds the return value of the
``callback`` and will be marked as completed when the given
``callback`` finishes.
.. note:: Note that if the callback function throws, either
through the original future being completed with an exception and
calling ``fut.wait()``, or through other code in the callback, the
future returned by ``then`` will be marked appropriately with the
encountered error. However, if this callback later completes
additional futures, those futures are not marked as completed with
an error and the user is responsible for handling completion/waiting
on those futures independently.
Example::
>>> def callback(fut):
... print(f"RPC return value is {fut.wait()}.")
>>> fut = torch.futures.Future()
>>> # The inserted callback will print the return value when
>>> # receiving the response from "worker1"
>>> cb_fut = fut.then(callback)
>>> chain_cb_fut = cb_fut.then(
... lambda x : print(f"Chained cb done. {x.wait()}")
... )
>>> fut.set_result(5)
RPC return value is 5.
Chained cb done. None
"""
return cast(Future[S], super().then(callback))
def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:
r"""
Append the given callback function to this ``Future``, which will be run
when the ``Future`` is completed. Multiple callbacks can be added to
the same ``Future``, but the order in which they will be executed cannot
be guaranteed. The callback must take one argument, which is the
reference to this ``Future``. The callback function can use the
:meth:`value` method to get the value. Note that if this ``Future`` is
already completed, the given callback will be run inline.
We recommend that you use the :meth:`then` method as it provides a way
to synchronize after your callback has completed. ``add_done_callback``
can be cheaper if your callback does not return anything. But both
:meth:`then` and ``add_done_callback`` use the same callback
registration API under the hood.
With respect to GPU tensors, this method behaves in the same way as
:meth:`then`.
Args:
callback(``Future``): a ``Callable`` that takes in one argument,
which is the reference to this ``Future``.
.. note:: Note that if the callback function throws, either
through the original future being completed with an exception and
calling ``fut.wait()``, or through other code in the callback,
error handling must be carefully taken care of. For example, if
this callback later completes additional futures, those futures are
not marked as completed with an error and the user is responsible
for handling completion/waiting on those futures independently.
Example::
>>> def callback(fut):
... print(f"This will run after the future has finished.")
... print(fut.wait())
>>> fut = torch.futures.Future()
>>> fut.add_done_callback(callback)
>>> fut.set_result(5)
This will run after the future has finished.
5
"""
super().add_done_callback(callback)
def set_result(self, result: T) -> None:
r"""
Set the result for this ``Future``, which will mark this ``Future`` as
completed and trigger all attached callbacks. Note that a ``Future``
cannot be marked completed twice.
If the result contains tensors that reside on GPUs, this method can be
called even if the asynchronous kernels that are populating those
tensors haven't yet completed running on the device, provided that the
streams on which those kernels were enqueued are set as the current ones
when this method is called. Put simply, it's safe to call this method
immediately after launching those kernels, without any additional
synchronization, as long as one doesn't change streams in between. This
method will record events on all the relevant current streams and will
use them to ensure proper scheduling for all the consumers of this
``Future``.
Args:
result (object): the result object of this ``Future``.
Example::
>>> import threading
>>> import time
>>> def slow_set_future(fut, value):
... time.sleep(0.5)
... fut.set_result(value)
>>> fut = torch.futures.Future()
>>> t = threading.Thread(
... target=slow_set_future,
... args=(fut, torch.ones(2) * 3)
... )
>>> t.start()
>>> print(fut.wait())
tensor([3., 3.])
>>> t.join()
"""
super().set_result(result)
def set_exception(self, result: T) -> None:
r"""
Set an exception for this ``Future``, which will mark this ``Future`` as
completed with an error and trigger all attached callbacks. Note that
when calling wait()/value() on this ``Future``, the exception set here
will be raised inline.
Args:
result (BaseException): the exception for this ``Future``.
Example::
>>> fut = torch.futures.Future()
>>> fut.set_exception(ValueError("foo"))
>>> fut.wait()
Traceback (most recent call last):
...
ValueError: foo
"""
assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception."
def raise_error(fut_result):
raise fut_result
super()._set_unwrap_func(raise_error)
self.set_result(result) # type: ignore[arg-type]
def collect_all(futures: List[Future]) -> Future[List[Future]]:
r"""
Collects the provided :class:`~torch.futures.Future` objects into a single
combined :class:`~torch.futures.Future` that is completed when all of the
sub-futures are completed.
Args:
futures (list): a list of :class:`~torch.futures.Future` objects.
Returns:
Returns a :class:`~torch.futures.Future` object to a list of the passed
in Futures.
Example::
>>> fut0 = torch.futures.Future()
>>> fut1 = torch.futures.Future()
>>> fut = torch.futures.collect_all([fut0, fut1])
>>> fut0.set_result(0)
>>> fut1.set_result(1)
>>> fut_list = fut.wait()
>>> print(f"fut0 result = {fut_list[0].wait()}")
fut0 result = 0
>>> print(f"fut1 result = {fut_list[1].wait()}")
fut1 result = 1
"""
return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures)))
def wait_all(futures: List[Future]) -> List:
r"""
Waits for all provided futures to be complete, and returns
the list of completed values. If any of the futures encounters an error,
the method will exit early and report the error not waiting for other
futures to complete.
Args:
futures (list): a list of :class:`~torch.futures.Future` object.
Returns:
A list of the completed :class:`~torch.futures.Future` results. This
method will throw an error if ``wait`` on any
:class:`~torch.futures.Future` throws.
"""
return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()]
|
pytorch-master
|
torch/futures/__init__.py
|
# -*- coding: utf-8 -*-
import sys
import torch
from torch._C import _add_docstr, _linalg # type: ignore[attr-defined]
LinAlgError = torch._C._LinAlgError # type: ignore[attr-defined]
Tensor = torch.Tensor
common_notes = {
"experimental_warning": """This function is "experimental" and it may change in a future PyTorch release.""",
"sync_note": "When inputs are on a CUDA device, this function synchronizes that device with the CPU.",
"sync_note_ex": r"When the inputs are on a CUDA device, this function synchronizes only when :attr:`check_errors`\ `= True`.",
"sync_note_has_ex": ("When inputs are on a CUDA device, this function synchronizes that device with the CPU. "
"For a version of this function that does not synchronize, see :func:`{}`.")
}
# Note: This not only adds doc strings for functions in the linalg namespace, but
# also connects the torch.linalg Python namespace to the torch._C._linalg builtins.
cross = _add_docstr(_linalg.linalg_cross, r"""
linalg.cross(input, other, *, dim=-1, out=None) -> Tensor
Computes the cross product of two 3-dimensional vectors.
Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
of vectors, for which it computes the product along the dimension :attr:`dim`.
In this case, the output has the same batch dimensions as the inputs broadcast to
a common shape.
Args:
input (Tensor): the first input tensor.
other (Tensor): the second input tensor.
dim (int, optional): the dimension along which to take the cross-product. Default: `-1`.
Keyword args:
out (Tensor, optional): the output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: If after broadcasting :attr:`input`\ `.size(\ `:attr:`dim`\ `) != 3`
or :attr:`other`\ `.size(\ `:attr:`dim`\ `) != 3`.
Example:
>>> a = torch.randn(4, 3)
>>> a
tensor([[-0.3956, 1.1455, 1.6895],
[-0.5849, 1.3672, 0.3599],
[-1.1626, 0.7180, -0.0521],
[-0.1339, 0.9902, -2.0225]])
>>> b = torch.randn(4, 3)
>>> b
tensor([[-0.0257, -1.4725, -1.2251],
[-1.1479, -0.7005, -1.9757],
[-1.3904, 0.3726, -1.1836],
[-0.9688, -0.7153, 0.2159]])
>>> torch.linalg.cross(a, b)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
>>> a = torch.randn(1, 3) # a is broadcast to match shape of b
>>> a
tensor([[-0.9941, -0.5132, 0.5681]])
>>> torch.linalg.cross(a, b)
tensor([[ 1.4653, -1.2325, 1.4507],
[ 1.4119, -2.6163, 0.1073],
[ 0.3957, -1.9666, -1.0840],
[ 0.2956, -0.3357, 0.2139]])
""")
cholesky = _add_docstr(_linalg.linalg_cholesky, r"""
linalg.cholesky(A, *, upper=False, out=None) -> Tensor
Computes the Cholesky decomposition of a complex Hermitian or real symmetric positive-definite matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **Cholesky decomposition** of a complex Hermitian or real symmetric positive-definite matrix
:math:`A \in \mathbb{K}^{n \times n}` is defined as
.. math::
A = LL^{\text{H}}\mathrlap{\qquad L \in \mathbb{K}^{n \times n}}
where :math:`L` is a lower triangular matrix with real positive diagonal (even in the complex case) and
:math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, and the transpose when :math:`L` is real-valued.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. seealso::
:func:`torch.linalg.cholesky_ex` for a version of this operation that
skips the (slow) error checking by default and instead returns the debug
information. This makes it a faster way to check if a matrix is
positive-definite.
:func:`torch.linalg.eigh` for a different decomposition of a Hermitian matrix.
The eigenvalue decomposition gives more information about the matrix but it
slower to compute than the Cholesky decomposition.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian positive-definite matrices.
Keyword args:
upper (bool, optional): whether to return an upper triangular matrix.
The tensor returned with upper=True is the conjugate transpose of the tensor
returned with upper=False.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the :attr:`A` matrix or any matrix in a batched :attr:`A` is not Hermitian
(resp. symmetric) positive-definite. If :attr:`A` is a batch of matrices,
the error message will include the batch index of the first matrix that fails
to meet this condition.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A @ A.T.conj() + torch.eye(2) # creates a Hermitian positive-definite matrix
>>> A
tensor([[2.5266+0.0000j, 1.9586-2.0626j],
[1.9586+2.0626j, 9.4160+0.0000j]], dtype=torch.complex128)
>>> L = torch.linalg.cholesky(A)
>>> L
tensor([[1.5895+0.0000j, 0.0000+0.0000j],
[1.2322+1.2976j, 2.4928+0.0000j]], dtype=torch.complex128)
>>> torch.dist(L @ L.T.conj(), A)
tensor(4.4692e-16, dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> A = A @ A.mT + torch.eye(2) # batch of symmetric positive-definite matrices
>>> L = torch.linalg.cholesky(A)
>>> torch.dist(L @ L.mT, A)
tensor(5.8747e-16, dtype=torch.float64)
""")
cholesky_ex = _add_docstr(_linalg.linalg_cholesky_ex, r"""
linalg.cholesky_ex(A, *, upper=False, check_errors=False, out=None) -> (Tensor, Tensor)
Computes the Cholesky decomposition of a complex Hermitian or real
symmetric positive-definite matrix.
This function skips the (slow) error checking and error message construction
of :func:`torch.linalg.cholesky`, instead directly returning the LAPACK
error codes as part of a named tuple ``(L, info)``. This makes this function
a faster way to check if a matrix is positive-definite, and it provides an
opportunity to handle decomposition errors more gracefully or performantly
than :func:`torch.linalg.cholesky` does.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`A` is not a Hermitian positive-definite matrix, or if it's a batch of matrices
and one or more of them is not a Hermitian positive-definite matrix,
then ``info`` stores a positive integer for the corresponding matrix.
The positive integer indicates the order of the leading minor that is not positive-definite,
and the decomposition could not be completed.
``info`` filled with zeros indicates that the decomposition was successful.
If ``check_errors=True`` and ``info`` contains positive integers, then a RuntimeError is thrown.
""" + fr"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
""" + r"""
.. seealso::
:func:`torch.linalg.cholesky` is a NumPy compatible variant that always checks for errors.
Args:
A (Tensor): the Hermitian `n \times n` matrix or the batch of such matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
Keyword args:
upper (bool, optional): whether to return an upper triangular matrix.
The tensor returned with upper=True is the conjugate transpose of the tensor
returned with upper=False.
check_errors (bool, optional): controls whether to check the content of ``infos``. Default: `False`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A @ A.t().conj() # creates a Hermitian positive-definite matrix
>>> L, info = torch.linalg.cholesky_ex(A)
>>> A
tensor([[ 2.3792+0.0000j, -0.9023+0.9831j],
[-0.9023-0.9831j, 0.8757+0.0000j]], dtype=torch.complex128)
>>> L
tensor([[ 1.5425+0.0000j, 0.0000+0.0000j],
[-0.5850-0.6374j, 0.3567+0.0000j]], dtype=torch.complex128)
>>> info
tensor(0, dtype=torch.int32)
""")
inv = _add_docstr(_linalg.linalg_inv, r"""
linalg.inv(A, *, out=None) -> Tensor
Computes the inverse of a square matrix if it exists.
Throws a `RuntimeError` if the matrix is not invertible.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
for a matrix :math:`A \in \mathbb{K}^{n \times n}`,
its **inverse matrix** :math:`A^{-1} \in \mathbb{K}^{n \times n}` (if it exists) is defined as
.. math::
A^{-1}A = AA^{-1} = \mathrm{I}_n
where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
The inverse matrix exists if and only if :math:`A` is `invertible`_. In this case,
the inverse is unique.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices
then the output has the same batch dimensions.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. note::
Consider using :func:`torch.linalg.solve` if possible for multiplying a matrix on the left by
the inverse, as::
linalg.solve(A, B) == linalg.inv(A) @ B # When B is a matrix
It is always prefered to use :func:`~solve` when possible, as it is faster and more
numerically stable than computing the inverse explicitly.
.. seealso::
:func:`torch.linalg.pinv` computes the pseudoinverse (Moore-Penrose inverse) of matrices
of any shape.
:func:`torch.linalg.solve` computes :attr:`A`\ `.inv() @ \ `:attr:`B` with a
numerically stable algorithm.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of invertible matrices.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the matrix :attr:`A` or any matrix in the batch of matrices :attr:`A` is not invertible.
Examples::
>>> A = torch.randn(4, 4)
>>> Ainv = torch.linalg.inv(A)
>>> torch.dist(A @ Ainv, torch.eye(4))
tensor(1.1921e-07)
>>> A = torch.randn(2, 3, 4, 4) # Batch of matrices
>>> Ainv = torch.linalg.inv(A)
>>> torch.dist(A @ Ainv, torch.eye(4))
tensor(1.9073e-06)
>>> A = torch.randn(4, 4, dtype=torch.complex128) # Complex matrix
>>> Ainv = torch.linalg.inv(A)
>>> torch.dist(A @ Ainv, torch.eye(4))
tensor(7.5107e-16, dtype=torch.float64)
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""")
solve_ex = _add_docstr(_linalg.linalg_solve_ex, r"""
linalg.solve_ex(A, B, *, left=True, check_errors=False, out=None) -> (Tensor, Tensor)
A version of :func:`~solve` that does not perform error checks unless :attr:`check_errors`\ `= True`.
It also returns the :attr:`info` tensor returned by `LAPACK's getrf`_.
""" + fr"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
""" + r"""
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
check_errors (bool, optional): controls whether to check the content of ``infos`` and raise
an error if it is non-zero. Default: `False`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(result, info)`.
Examples::
>>> A = torch.randn(3, 3)
>>> Ainv, info = torch.linalg.solve_ex(A)
>>> torch.dist(torch.linalg.inv(A), Ainv)
tensor(0.)
>>> info
tensor(0, dtype=torch.int32)
.. _LAPACK's getrf:
https://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html
""")
inv_ex = _add_docstr(_linalg.linalg_inv_ex, r"""
linalg.inv_ex(A, *, check_errors=False, out=None) -> (Tensor, Tensor)
Computes the inverse of a square matrix if it is invertible.
Returns a namedtuple ``(inverse, info)``. ``inverse`` contains the result of
inverting :attr:`A` and ``info`` stores the LAPACK error codes.
If :attr:`A` is not an invertible matrix, or if it's a batch of matrices
and one or more of them is not an invertible matrix,
then ``info`` stores a positive integer for the corresponding matrix.
The positive integer indicates the diagonal element of the LU decomposition of
the input matrix that is exactly zero.
``info`` filled with zeros indicates that the inversion was successful.
If ``check_errors=True`` and ``info`` contains positive integers, then a RuntimeError is thrown.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
""" + fr"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
""" + r"""
.. seealso::
:func:`torch.linalg.inv` is a NumPy compatible variant that always checks for errors.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of square matrices.
check_errors (bool, optional): controls whether to check the content of ``info``. Default: `False`.
Keyword args:
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3)
>>> Ainv, info = torch.linalg.inv_ex(A)
>>> torch.dist(torch.linalg.inv(A), Ainv)
tensor(0.)
>>> info
tensor(0, dtype=torch.int32)
""")
det = _add_docstr(_linalg.linalg_det, r"""
linalg.det(A, *, out=None) -> Tensor
Computes the determinant of a square matrix.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
absolute value of the determinant of real-valued (resp. complex) square matrices.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3)
>>> torch.linalg.det(A)
tensor(0.0934)
>>> A = torch.randn(3, 2, 2)
>>> torch.linalg.det(A)
tensor([1.1990, 0.4099, 0.7386])
""")
slogdet = _add_docstr(_linalg.linalg_slogdet, r"""
linalg.slogdet(A, *, out=None) -> (Tensor, Tensor)
Computes the sign and natural logarithm of the absolute value of the determinant of a square matrix.
For complex :attr:`A`, it returns the angle and the natural logarithm of the modulus of the
determinant, that is, a logarithmic polar decomposition of the determinant.
The determinant can be recovered as `sign * exp(logabsdet)`.
When a matrix has a determinant of zero, it returns `(0, -inf)`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.det` computes the determinant of square matrices.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(sign, logabsdet)`.
`sign` will have the same dtype as :attr:`A`.
`logabsdet` will always be real-valued, even when :attr:`A` is complex.
Examples::
>>> A = torch.randn(3, 3)
>>> A
tensor([[ 0.0032, -0.2239, -1.1219],
[-0.6690, 0.1161, 0.4053],
[-1.6218, -0.9273, -0.0082]])
>>> torch.linalg.det(A)
tensor(-0.7576)
>>> torch.logdet(A)
tensor(nan)
>>> torch.linalg.slogdet(A)
torch.return_types.linalg_slogdet(sign=tensor(-1.), logabsdet=tensor(-0.2776))
""")
eig = _add_docstr(_linalg.linalg_eig, r"""
linalg.eig(A, *, out=None) -> (Tensor, Tensor)
Computes the eigenvalue decomposition of a square matrix if it exists.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalue decomposition** of a square matrix
:math:`A \in \mathbb{K}^{n \times n}` (if it exists) is defined as
.. math::
A = V \operatorname{diag}(\Lambda) V^{-1}\mathrlap{\qquad V \in \mathbb{C}^{n \times n}, \Lambda \in \mathbb{C}^n}
This decomposition exists if and only if :math:`A` is `diagonalizable`_.
This is the case when all its eigenvalues are different.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. note:: The eigenvalues and eigenvectors of a real matrix may be complex.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. warning:: This function assumes that :attr:`A` is `diagonalizable`_ (for example, when all the
eigenvalues are different). If it is not diagonalizable, the returned
eigenvalues will be correct but :math:`A \neq V \operatorname{diag}(\Lambda)V^{-1}`.
.. warning:: The returned eigenvectors are normalized to have norm `1`.
Even then, the eigenvectors of a matrix are not unique, nor are they continuous with respect to
:attr:`A`. Due to this lack of uniqueness, different hardware and software may compute
different eigenvectors.
This non-uniqueness is caused by the fact that multiplying an eigenvector by
by :math:`e^{i \phi}, \phi \in \mathbb{R}` produces another set of valid eigenvectors
of the matrix. For this reason, the loss function shall not depend on the phase of the
eigenvectors, as this quantity is not well-defined.
This is checked when computing the gradients of this function. As such,
when inputs are on a CUDA device, this function synchronizes that device with the CPU
when computing the gradients.
This is checked when computing the gradients of this function. As such,
when inputs are on a CUDA device, the computation of the gradients
of this function synchronizes that device with the CPU.
.. warning:: Gradients computed using the `eigenvectors` tensor will only be finite when
:attr:`A` has distinct eigenvalues.
Furthermore, if the distance between any two eigenvalues is close to zero,
the gradient will be numerically unstable, as it depends on the eigenvalues
:math:`\lambda_i` through the computation of
:math:`\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}`.
.. seealso::
:func:`torch.linalg.eigvals` computes only the eigenvalues.
Unlike :func:`torch.linalg.eig`, the gradients of :func:`~eigvals` are always
numerically stable.
:func:`torch.linalg.eigh` for a (faster) function that computes the eigenvalue decomposition
for Hermitian and symmetric matrices.
:func:`torch.linalg.svd` for a function that computes another type of spectral
decomposition that works on matrices of any shape.
:func:`torch.linalg.qr` for another (much faster) decomposition that works on matrices of
any shape.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of diagonalizable matrices.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(eigenvalues, eigenvectors)` which corresponds to :math:`\Lambda` and :math:`V` above.
`eigenvalues` and `eigenvectors` will always be complex-valued, even when :attr:`A` is real. The eigenvectors
will be given by the columns of `eigenvectors`.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A
tensor([[ 0.9828+0.3889j, -0.4617+0.3010j],
[ 0.1662-0.7435j, -0.6139+0.0562j]], dtype=torch.complex128)
>>> L, V = torch.linalg.eig(A)
>>> L
tensor([ 1.1226+0.5738j, -0.7537-0.1286j], dtype=torch.complex128)
>>> V
tensor([[ 0.9218+0.0000j, 0.1882-0.2220j],
[-0.0270-0.3867j, 0.9567+0.0000j]], dtype=torch.complex128)
>>> torch.dist(V @ torch.diag(L) @ torch.linalg.inv(V), A)
tensor(7.7119e-16, dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> L, V = torch.linalg.eig(A)
>>> torch.dist(V @ torch.diag_embed(L) @ torch.linalg.inv(V), A)
tensor(3.2841e-16, dtype=torch.float64)
.. _diagonalizable:
https://en.wikipedia.org/wiki/Diagonalizable_matrix#Definition
""")
eigvals = _add_docstr(_linalg.linalg_eigvals, r"""
linalg.eigvals(A, *, out=None) -> Tensor
Computes the eigenvalues of a square matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalues** of a square matrix :math:`A \in \mathbb{K}^{n \times n}` are defined
as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
.. math::
p(\lambda) = \operatorname{det}(A - \lambda \mathrm{I}_n)\mathrlap{\qquad \lambda \in \mathbb{C}}
where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. note:: The eigenvalues of a real matrix may be complex, as the roots of a real polynomial may be complex.
The eigenvalues of a matrix are always well-defined, even when the matrix is not diagonalizable.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. seealso::
:func:`torch.linalg.eig` computes the full eigenvalue decomposition.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A complex-valued tensor cointaining the eigenvalues even when :attr:`A` is real.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> L = torch.linalg.eigvals(A)
>>> L
tensor([ 1.1226+0.5738j, -0.7537-0.1286j], dtype=torch.complex128)
>>> torch.dist(L, torch.linalg.eig(A).eigenvalues)
tensor(2.4576e-07)
""")
eigh = _add_docstr(_linalg.linalg_eigh, r"""
linalg.eigh(A, UPLO='L', *, out=None) -> (Tensor, Tensor)
Computes the eigenvalue decomposition of a complex Hermitian or real symmetric matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalue decomposition** of a complex Hermitian or real symmetric matrix
:math:`A \in \mathbb{K}^{n \times n}` is defined as
.. math::
A = Q \operatorname{diag}(\Lambda) Q^{\text{H}}\mathrlap{\qquad Q \in \mathbb{K}^{n \times n}, \Lambda \in \mathbb{R}^n}
where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex, and the transpose when :math:`Q` is real-valued.
:math:`Q` is orthogonal in the real case and unitary in the complex case.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
:attr:`A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
- If :attr:`UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
- If :attr:`UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
The eigenvalues are returned in ascending order.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. note:: The eigenvalues of real symmetric or complex Hermitian matrices are always real.
.. warning:: The eigenvectors of a symmetric matrix are not unique, nor are they continuous with
respect to :attr:`A`. Due to this lack of uniqueness, different hardware and
software may compute different eigenvectors.
This non-uniqueness is caused by the fact that multiplying an eigenvector by
`-1` in the real case or by :math:`e^{i \phi}, \phi \in \mathbb{R}` in the complex
case produces another set of valid eigenvectors of the matrix.
For this reason, the loss function shall not depend on the phase of the eigenvectors, as
this quantity is not well-defined.
This is checked for complex inputs when computing the gradients of this function. As such,
when inputs are complex and are on a CUDA device, the computation of the gradients
of this function synchronizes that device with the CPU.
.. warning:: Gradients computed using the `eigenvectors` tensor will only be finite when
:attr:`A` has distinct eigenvalues.
Furthermore, if the distance between any two eigenvalues is close to zero,
the gradient will be numerically unstable, as it depends on the eigenvalues
:math:`\lambda_i` through the computation of
:math:`\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}`.
.. seealso::
:func:`torch.linalg.eigvalsh` computes only the eigenvalues of a Hermitian matrix.
Unlike :func:`torch.linalg.eigh`, the gradients of :func:`~eigvalsh` are always
numerically stable.
:func:`torch.linalg.cholesky` for a different decomposition of a Hermitian matrix.
The Cholesky decomposition gives less information about the matrix but is much faster
to compute than the eigenvalue decomposition.
:func:`torch.linalg.eig` for a (slower) function that computes the eigenvalue decomposition
of a not necessarily Hermitian square matrix.
:func:`torch.linalg.svd` for a (slower) function that computes the more general SVD
decomposition of matrices of any shape.
:func:`torch.linalg.qr` for another (much faster) decomposition that works on general
matrices.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian matrices.
UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
of :attr:`A` in the computations. Default: `'L'`.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(eigenvalues, eigenvectors)` which corresponds to :math:`\Lambda` and :math:`Q` above.
`eigenvalues` will always be real-valued, even when :attr:`A` is complex.
It will also be ordered in ascending order.
`eigenvectors` will have the same dtype as :attr:`A` and will contain the eigenvectors as its columns.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A + A.T.conj() # creates a Hermitian matrix
>>> A
tensor([[2.9228+0.0000j, 0.2029-0.0862j],
[0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128)
>>> L, Q = torch.linalg.eigh(A)
>>> L
tensor([0.3277, 2.9415], dtype=torch.float64)
>>> Q
tensor([[-0.0846+-0.0000j, -0.9964+0.0000j],
[ 0.9170+0.3898j, -0.0779-0.0331j]], dtype=torch.complex128)
>>> torch.dist(Q @ torch.diag(L.cdouble()) @ Q.T.conj(), A)
tensor(6.1062e-16, dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> A = A + A.mT # creates a batch of symmetric matrices
>>> L, Q = torch.linalg.eigh(A)
>>> torch.dist(Q @ torch.diag_embed(L) @ Q.mH, A)
tensor(1.5423e-15, dtype=torch.float64)
""")
eigvalsh = _add_docstr(_linalg.linalg_eigvalsh, r"""
linalg.eigvalsh(A, UPLO='L', *, out=None) -> Tensor
Computes the eigenvalues of a complex Hermitian or real symmetric matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **eigenvalues** of a complex Hermitian or real symmetric matrix :math:`A \in \mathbb{K}^{n \times n}`
are defined as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
.. math::
p(\lambda) = \operatorname{det}(A - \lambda \mathrm{I}_n)\mathrlap{\qquad \lambda \in \mathbb{R}}
where :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
The eigenvalues of a real symmetric or complex Hermitian matrix are always real.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The eigenvalues are returned in ascending order.
:attr:`A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
- If :attr:`UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
- If :attr:`UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. seealso::
:func:`torch.linalg.eigh` computes the full eigenvalue decomposition.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
consisting of symmetric or Hermitian matrices.
UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
of :attr:`A` in the computations. Default: `'L'`.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A real-valued tensor cointaining the eigenvalues even when :attr:`A` is complex.
The eigenvalues are returned in ascending order.
Examples::
>>> A = torch.randn(2, 2, dtype=torch.complex128)
>>> A = A + A.T.conj() # creates a Hermitian matrix
>>> A
tensor([[2.9228+0.0000j, 0.2029-0.0862j],
[0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128)
>>> torch.linalg.eigvalsh(A)
tensor([0.3277, 2.9415], dtype=torch.float64)
>>> A = torch.randn(3, 2, 2, dtype=torch.float64)
>>> A = A + A.mT # creates a batch of symmetric matrices
>>> torch.linalg.eigvalsh(A)
tensor([[ 2.5797, 3.4629],
[-4.1605, 1.3780],
[-3.1113, 2.7381]], dtype=torch.float64)
""")
householder_product = _add_docstr(_linalg.linalg_householder_product, r"""
householder_product(A, tau, *, out=None) -> Tensor
Computes the first `n` columns of a product of Householder matrices.
Let :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, and
let :math:`V \in \mathbb{K}^{m \times n}` be a matrix with columns :math:`v_i \in \mathbb{K}^m`
for :math:`i=1,\ldots,m` with :math:`m \geq n`. Denote by :math:`w_i` the vector resulting from
zeroing out the first :math:`i-1` compontents of :math:`v_i` and setting to `1` the :math:`i`-th.
For a vector :math:`\tau \in \mathbb{K}^k` with :math:`k \leq n`, this function computes the
first :math:`n` columns of the matrix
.. math::
H_1H_2 ... H_k \qquad\text{with}\qquad H_i = \mathrm{I}_m - \tau_i w_i w_i^{\text{H}}
where :math:`\mathrm{I}_m` is the `m`-dimensional identity matrix and :math:`w^{\text{H}}` is the
conjugate transpose when :math:`w` is complex, and the transpose when :math:`w` is real-valued.
The output matrix is the same size as the input matrix :attr:`A`.
See `Representation of Orthogonal or Unitary Matrices`_ for further details.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.geqrf` can be used together with this function to form the `Q` from the
:func:`~qr` decomposition.
:func:`torch.ormqr` is a related function that computes the matrix multiplication
of a product of Householder matrices with another matrix.
However, that function is not supported by autograd.
.. warning::
Gradient computations are only well-defined if :math:`tau_i \neq \frac{1}{||v_i||^2}`.
If this condition is not met, no error will be thrown, but the gradient produced may contain `NaN`.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
tau (Tensor): tensor of shape `(*, k)` where `*` is zero or more batch dimensions.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if :attr:`A` doesn't satisfy the requirement `m >= n`,
or :attr:`tau` doesn't satisfy the requirement `n >= k`.
Examples::
>>> A = torch.randn(2, 2)
>>> h, tau = torch.geqrf(A)
>>> Q = torch.linalg.householder_product(h, tau)
>>> torch.dist(Q, torch.linalg.qr(A).Q)
tensor(0.)
>>> h = torch.randn(3, 2, 2, dtype=torch.complex128)
>>> tau = torch.randn(3, 1, dtype=torch.complex128)
>>> Q = torch.linalg.householder_product(h, tau)
>>> Q
tensor([[[ 1.8034+0.4184j, 0.2588-1.0174j],
[-0.6853+0.7953j, 2.0790+0.5620j]],
[[ 1.4581+1.6989j, -1.5360+0.1193j],
[ 1.3877-0.6691j, 1.3512+1.3024j]],
[[ 1.4766+0.5783j, 0.0361+0.6587j],
[ 0.6396+0.1612j, 1.3693+0.4481j]]], dtype=torch.complex128)
.. _Representation of Orthogonal or Unitary Matrices:
https://www.netlib.org/lapack/lug/node128.html
""")
ldl_factor = _add_docstr(_linalg.linalg_ldl_factor, r"""
linalg.ldl_factor(A, *, hermitian=False, out=None) -> (Tensor, Tensor)
Computes a compact representation of the LDL factorization of a Hermitian or symmetric (possibly indefinite) matrix.
When :attr:`A` is complex valued it can be Hermitian (:attr:`hermitian`\ `= True`)
or symmetric (:attr:`hermitian`\ `= False`).
The factorization is of the form the form :math:`A = L D L^T`.
If :attr:`hermitian` is `True` then transpose operation is the conjugate transpose.
:math:`L` (or :math:`U`) and :math:`D` are stored in compact form in ``LD``.
They follow the format specified by `LAPACK's sytrf`_ function.
These tensors may be used in :func:`torch.linalg.ldl_solve` to solve linear systems.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
""" + fr"""
.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.ldl_factor_ex")}
""" + r"""
Args:
A (Tensor): tensor of shape (*, n, n) where * is zero or more batch dimensions consisting of symmetric or Hermitian matrices.
`(*, n, n)` where `*` is one or more batch dimensions.
Keyword args:
hermitian (bool, optional): whether to consider the input to be Hermitian or symmetric.
For real-valued matrices, this switch has no effect. Default: `False`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LD, pivots)`.
Examples::
>>> A = torch.randn(3, 3)
>>> A = A @ A.mT # make symmetric
>>> A
tensor([[7.2079, 4.2414, 1.9428],
[4.2414, 3.4554, 0.3264],
[1.9428, 0.3264, 1.3823]])
>>> LD, pivots = torch.linalg.ldl_factor(A)
>>> LD
tensor([[ 7.2079, 0.0000, 0.0000],
[ 0.5884, 0.9595, 0.0000],
[ 0.2695, -0.8513, 0.1633]])
>>> pivots
tensor([1, 2, 3], dtype=torch.int32)
.. _LAPACK's sytrf:
https://www.netlib.org/lapack/explore-html/d3/db6/group__double_s_ycomputational_gad91bde1212277b3e909eb6af7f64858a.html
""")
ldl_factor_ex = _add_docstr(_linalg.linalg_ldl_factor_ex, r"""
linalg.ldl_factor_ex(A, *, hermitian=False, check_errors=False, out=None) -> (Tensor, Tensor, Tensor)
This is a version of :func:`~ldl_factor` that does not perform error checks unless :attr:`check_errors`\ `= True`.
It also returns the :attr:`info` tensor returned by `LAPACK's sytrf`_.
``info`` stores integer error codes from the backend library.
A positive integer indicates the diagonal element of :math:`D` that is zero.
Division by 0 will occur if the result is used for solving a system of linear equations.
``info`` filled with zeros indicates that the factorization was successful.
If ``check_errors=True`` and ``info`` contains positive integers, then a `RuntimeError` is thrown.
""" + fr"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
""" + r"""
Args:
A (Tensor): tensor of shape (*, n, n) where * is zero or more batch dimensions consisting of symmetric or Hermitian matrices.
`(*, n, n)` where `*` is one or more batch dimensions.
Keyword args:
hermitian (bool, optional): whether to consider the input to be Hermitian or symmetric.
For real-valued matrices, this switch has no effect. Default: `False`.
check_errors (bool, optional): controls whether to check the content of ``info`` and raise
an error if it is non-zero. Default: `False`.
out (tuple, optional): tuple of three tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LD, pivots, info)`.
Examples::
>>> A = torch.randn(3, 3)
>>> A = A @ A.mT # make symmetric
>>> A
tensor([[7.2079, 4.2414, 1.9428],
[4.2414, 3.4554, 0.3264],
[1.9428, 0.3264, 1.3823]])
>>> LD, pivots, info = torch.linalg.ldl_factor_ex(A)
>>> LD
tensor([[ 7.2079, 0.0000, 0.0000],
[ 0.5884, 0.9595, 0.0000],
[ 0.2695, -0.8513, 0.1633]])
>>> pivots
tensor([1, 2, 3], dtype=torch.int32)
>>> info
tensor(0, dtype=torch.int32)
.. _LAPACK's sytrf:
https://www.netlib.org/lapack/explore-html/d3/db6/group__double_s_ycomputational_gad91bde1212277b3e909eb6af7f64858a.html
""")
ldl_solve = _add_docstr(_linalg.linalg_ldl_solve, r"""
linalg.ldl_solve(LD, pivots, B, *, hermitian=False, out=None) -> Tensor
Computes the solution of a system of linear equations using the LDL factorization.
:attr:`LD` and :attr:`pivots` are the compact representation of the LDL factorization and
are expected to be computed by :func:`torch.linalg.ldl_factor_ex`.
:attr:`hermitian` argument to this function should be the same
as the corresponding argumens in :func:`torch.linalg.ldl_factor_ex`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
""" + fr"""
.. warning:: {common_notes["experimental_warning"]}
""" + r"""
Args:
LD (Tensor): the `n \times n` matrix or the batch of such matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
pivots (Tensor): the pivots corresponding to the LDL factorization of :attr:`LD`.
B (Tensor): right-hand side tensor of shape `(*, n, k)`.
Keyword args:
hermitian (bool, optional): whether to consider the decomposed matrix to be Hermitian or symmetric.
For real-valued matrices, this switch has no effect. Default: `False`.
out (tuple, optional): output tensor. `B` may be passed as `out` and the result is computed in-place on `B`.
Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(2, 3, 3)
>>> A = A @ A.mT # make symmetric
>>> LD, pivots, info = torch.linalg.ldl_factor_ex(A)
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.ldl_solve(LD, pivots, B)
>>> torch.linalg.norm(A @ X - B)
>>> tensor(0.0001)
""")
lstsq = _add_docstr(_linalg.linalg_lstsq, r"""
torch.linalg.lstsq(A, B, rcond=None, *, driver=None) -> (Tensor, Tensor, Tensor, Tensor)
Computes a solution to the least squares problem of a system of linear equations.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **least squares problem** for a linear system :math:`AX = B` with
:math:`A \in \mathbb{K}^{m \times n}, B \in \mathbb{K}^{m \times k}` is defined as
.. math::
\min_{X \in \mathbb{K}^{n \times k}} \|AX - B\|_F
where :math:`\|-\|_F` denotes the Frobenius norm.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
:attr:`driver` chooses the LAPACK/MAGMA function that will be used.
For CPU inputs the valid values are `'gels'`, `'gelsy'`, `'gelsd`, `'gelss'`.
For CUDA input, the only valid driver is `'gels'`, which assumes that :attr:`A` is full-rank.
To choose the best driver on CPU consider:
- If :attr:`A` is well-conditioned (its `condition number`_ is not too large), or you do not mind some precision loss.
- For a general matrix: `'gelsy'` (QR with pivoting) (default)
- If :attr:`A` is full-rank: `'gels'` (QR)
- If :attr:`A` is not well-conditioned.
- `'gelsd'` (tridiagonal reduction and SVD)
- But if you run into memory issues: `'gelss'` (full SVD).
See also the `full description of these drivers`_
:attr:`rcond` is used to determine the effective rank of the matrices in :attr:`A`
when :attr:`driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`).
In this case, if :math:`\sigma_i` are the singular values of `A` in decreasing order,
:math:`\sigma_i` will be rounded down to zero if :math:`\sigma_i \leq \text{rcond} \cdot \sigma_1`.
If :attr:`rcond`\ `= None` (default), :attr:`rcond` is set to the machine precision of the dtype of :attr:`A` times `max(m, n)`.
This function returns the solution to the problem and some extra information in a named tuple of
four tensors `(solution, residuals, rank, singular_values)`. For inputs :attr:`A`, :attr:`B`
of shape `(*, m, n)`, `(*, m, k)` respectively, it cointains
- `solution`: the least squares solution. It has shape `(*, n, k)`.
- `residuals`: the squared residuals of the solutions, that is, :math:`\|AX - B\|_F^2`.
It has shape equal to the batch dimensions of :attr:`A`.
It is computed when `m > n` and every matrix in :attr:`A` is full-rank,
otherwise, it is an empty tensor.
If :attr:`A` is a batch of matrices and any matrix in the batch is not full rank,
then an empty tensor is returned. This behavior may change in a future PyTorch release.
- `rank`: tensor of ranks of the matrices in :attr:`A`.
It has shape equal to the batch dimensions of :attr:`A`.
It is computed when :attr:`driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`),
otherwise it is an empty tensor.
- `singular_values`: tensor of singular values of the matrices in :attr:`A`.
It has shape `(*, min(m, n))`.
It is computed when :attr:`driver` is one of (`'gelsd'`, `'gelss'`),
otherwise it is an empty tensor.
.. note::
This function computes `X = \ `:attr:`A`\ `.pinverse() @ \ `:attr:`B` in a faster and
more numerically stable way than performing the computations separately.
.. warning::
The default value of :attr:`rcond` may change in a future PyTorch release.
It is therefore recommended to use a fixed value to avoid potential
breaking changes.
Args:
A (Tensor): lhs tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
B (Tensor): rhs tensor of shape `(*, m, k)` where `*` is zero or more batch dimensions.
rcond (float, optional): used to determine the effective rank of :attr:`A`.
If :attr:`rcond`\ `= None`, :attr:`rcond` is set to the machine
precision of the dtype of :attr:`A` times `max(m, n)`. Default: `None`.
Keyword args:
driver (str, optional): name of the LAPACK/MAGMA method to be used.
If `None`, `'gelsy'` is used for CPU inputs and `'gels'` for CUDA inputs.
Default: `None`.
Returns:
A named tuple `(solution, residuals, rank, singular_values)`.
Examples::
>>> A = torch.tensor([[[10, 2, 3], [3, 10, 5], [5, 6, 12]]], dtype=torch.float) # shape (1, 3, 3)
>>> B = torch.tensor([[[2, 5, 1], [3, 2, 1], [5, 1, 9]],
[[4, 2, 9], [2, 0, 3], [2, 5, 3]]], dtype=torch.float) # shape (2, 3, 3)
>>> X = torch.linalg.lstsq(A, B).solution # A is broadcasted to shape (2, 3, 3)
>>> torch.dist(X, torch.linalg.pinv(A) @ B)
tensor(2.0862e-07)
>>> S = torch.linalg.lstsq(A, B, driver='gelsd').singular_values
>>> torch.dist(S, torch.linalg.svdvals(A))
tensor(5.7220e-06)
>>> A[:, 0].zero_() # Decrease the rank of A
>>> rank = torch.linalg.lstsq(A, B).rank
>>> rank
tensor([2])
.. _condition number:
https://pytorch.org/docs/master/linalg.html#torch.linalg.cond
.. _full description of these drivers:
https://www.netlib.org/lapack/lug/node27.html
""")
matrix_power = _add_docstr(_linalg.linalg_matrix_power, r"""
matrix_power(A, n, *, out=None) -> Tensor
Computes the `n`-th power of a square matrix for an integer `n`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`n`\ `= 0`, it returns the identity matrix (or batch) of the same shape
as :attr:`A`. If :attr:`n` is negative, it returns the inverse of each matrix
(if invertible) raised to the power of `abs(n)`.
.. note::
Consider using :func:`torch.linalg.solve` if possible for multiplying a matrix on the left by
a negative power as, if :attr:`n`\ `> 0`::
matrix_power(torch.linalg.solve(A, B), n) == matrix_power(A, -n) @ B
It is always prefered to use :func:`~solve` when possible, as it is faster and more
numerically stable than computing :math:`A^{-n}` explicitly.
.. seealso::
:func:`torch.linalg.solve` computes :attr:`A`\ `.inverse() @ \ `:attr:`B` with a
numerically stable algorithm.
Args:
A (Tensor): tensor of shape `(*, m, m)` where `*` is zero or more batch dimensions.
n (int): the exponent.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if :attr:`n`\ `< 0` and the matrix :attr:`A` or any matrix in the
batch of matrices :attr:`A` is not invertible.
Examples::
>>> A = torch.randn(3, 3)
>>> torch.linalg.matrix_power(A, 0)
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> torch.linalg.matrix_power(A, 3)
tensor([[ 1.0756, 0.4980, 0.0100],
[-1.6617, 1.4994, -1.9980],
[-0.4509, 0.2731, 0.8001]])
>>> torch.linalg.matrix_power(A.expand(2, -1, -1), -2)
tensor([[[ 0.2640, 0.4571, -0.5511],
[-1.0163, 0.3491, -1.5292],
[-0.4899, 0.0822, 0.2773]],
[[ 0.2640, 0.4571, -0.5511],
[-1.0163, 0.3491, -1.5292],
[-0.4899, 0.0822, 0.2773]]])
""")
matrix_rank = _add_docstr(_linalg.linalg_matrix_rank, r"""
linalg.matrix_rank(A, *, atol=None, rtol=None, hermitian=False, out=None) -> Tensor
Computes the numerical rank of a matrix.
The matrix rank is computed as the number of singular values
(or eigenvalues in absolute value when :attr:`hermitian`\ `= True`)
that are greater than :math:`\max(\text{atol}, \sigma_1 * \text{rtol})` threshold,
where :math:`\sigma_1` is the largest singular value (or eigenvalue).
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`hermitian`\ `= True`, :attr:`A` is assumed to be Hermitian if complex or
symmetric if real, but this is not checked internally. Instead, just the lower
triangular part of the matrix is used in the computations.
If :attr:`rtol` is not specified and :attr:`A` is a matrix of dimensions `(m, n)`,
the relative tolerance is set to be :math:`\text{rtol} = \max(m, n) \varepsilon`
and :math:`\varepsilon` is the epsilon value for the dtype of :attr:`A` (see :class:`.finfo`).
If :attr:`rtol` is not specified and :attr:`atol` is specified to be larger than zero then
:attr:`rtol` is set to zero.
If :attr:`atol` or :attr:`rtol` is a :class:`torch.Tensor`, its shape must be broadcastable to that
of the singular values of :attr:`A` as returned by :func:`torch.linalg.svdvals`.
.. note::
This function has NumPy compatible variant `linalg.matrix_rank(A, tol, hermitian=False)`.
However, use of the positional argument :attr:`tol` is deprecated in favor of :attr:`atol` and :attr:`rtol`.
""" + fr"""
.. note:: The matrix rank is computed using a singular value decomposition
:func:`torch.linalg.svdvals` if :attr:`hermitian`\ `= False` (default) and the eigenvalue
decomposition :func:`torch.linalg.eigvalsh` when :attr:`hermitian`\ `= True`.
{common_notes["sync_note"]}
""" + r"""
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
tol (float, Tensor, optional): [NumPy Compat] Alias for :attr:`atol`. Default: `None`.
Keyword args:
atol (float, Tensor, optional): the absolute tolerance value. When `None` it's considered to be zero.
Default: `None`.
rtol (float, Tensor, optional): the relative tolerance value. See above for the value it takes when `None`.
Default: `None`.
hermitian(bool): indicates whether :attr:`A` is Hermitian if complex
or symmetric if real. Default: `False`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.eye(10)
>>> torch.linalg.matrix_rank(A)
tensor(10)
>>> B = torch.eye(10)
>>> B[0, 0] = 0
>>> torch.linalg.matrix_rank(B)
tensor(9)
>>> A = torch.randn(4, 3, 2)
>>> torch.linalg.matrix_rank(A)
tensor([2, 2, 2, 2])
>>> A = torch.randn(2, 4, 2, 3)
>>> torch.linalg.matrix_rank(A)
tensor([[2, 2, 2, 2],
[2, 2, 2, 2]])
>>> A = torch.randn(2, 4, 3, 3, dtype=torch.complex64)
>>> torch.linalg.matrix_rank(A)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(A, hermitian=True)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(A, atol=1.0, rtol=0.0)
tensor([[3, 2, 2, 2],
[1, 2, 1, 2]])
>>> torch.linalg.matrix_rank(A, atol=1.0, rtol=0.0, hermitian=True)
tensor([[2, 2, 2, 1],
[1, 2, 2, 2]])
""")
norm = _add_docstr(_linalg.linalg_norm, r"""
linalg.norm(A, ord=None, dim=None, keepdim=False, *, out=None, dtype=None) -> Tensor
Computes a vector or matrix norm.
Supports input of float, double, cfloat and cdouble dtypes.
Whether this function computes a vector or matrix norm is determined as follows:
- If :attr:`dim` is an `int`, the vector norm will be computed.
- If :attr:`dim` is a `2`-`tuple`, the matrix norm will be computed.
- If :attr:`dim`\ `= None` and :attr:`ord`\ `= None`,
:attr:`A` will be flattened to 1D and the `2`-norm of the resulting vector will be computed.
- If :attr:`dim`\ `= None` and :attr:`ord` `!= None`, :attr:`A` must be 1D or 2D.
:attr:`ord` defines the norm that is computed. The following norms are supported:
====================== ========================= ========================================================
:attr:`ord` norm for matrices norm for vectors
====================== ========================= ========================================================
`None` (default) Frobenius norm `2`-norm (see below)
`'fro'` Frobenius norm -- not supported --
`'nuc'` nuclear norm -- not supported --
`inf` `max(sum(abs(x), dim=1))` `max(abs(x))`
`-inf` `min(sum(abs(x), dim=1))` `min(abs(x))`
`0` -- not supported -- `sum(x != 0)`
`1` `max(sum(abs(x), dim=0))` as below
`-1` `min(sum(abs(x), dim=0))` as below
`2` largest singular value as below
`-2` smallest singular value as below
other `int` or `float` -- not supported -- `sum(abs(x)^{ord})^{(1 / ord)}`
====================== ========================= ========================================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
.. seealso::
:func:`torch.linalg.vector_norm` computes a vector norm.
:func:`torch.linalg.matrix_norm` computes a matrix norm.
The above functions are often clearer and more flexible than using :func:`torch.linalg.norm`.
For example, `torch.linalg.norm(A, ord=1, dim=(0, 1))` always
computes a matrix norm, but with `torch.linalg.vector_norm(A, ord=1, dim=(0, 1))` it is possible
to compute a vector norm over the two dimensions.
Args:
A (Tensor): tensor of shape `(*, n)` or `(*, m, n)` where `*` is zero or more batch dimensions
ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `None`
dim (int, Tuple[int], optional): dimensions over which to compute
the vector or matrix norm. See above for the behavior when :attr:`dim`\ `= None`.
Default: `None`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): If specified, the input tensor is cast to
:attr:`dtype` before performing the operation, and the returned tensor's type
will be :attr:`dtype`. Default: `None`
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Examples::
>>> from torch import linalg as LA
>>> a = torch.arange(9, dtype=torch.float) - 4
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> B = a.reshape((3, 3))
>>> B
tensor([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> LA.norm(a)
tensor(7.7460)
>>> LA.norm(B)
tensor(7.7460)
>>> LA.norm(B, 'fro')
tensor(7.7460)
>>> LA.norm(a, float('inf'))
tensor(4.)
>>> LA.norm(B, float('inf'))
tensor(9.)
>>> LA.norm(a, -float('inf'))
tensor(0.)
>>> LA.norm(B, -float('inf'))
tensor(2.)
>>> LA.norm(a, 1)
tensor(20.)
>>> LA.norm(B, 1)
tensor(7.)
>>> LA.norm(a, -1)
tensor(0.)
>>> LA.norm(B, -1)
tensor(6.)
>>> LA.norm(a, 2)
tensor(7.7460)
>>> LA.norm(B, 2)
tensor(7.3485)
>>> LA.norm(a, -2)
tensor(0.)
>>> LA.norm(B.double(), -2)
tensor(1.8570e-16, dtype=torch.float64)
>>> LA.norm(a, 3)
tensor(5.8480)
>>> LA.norm(a, -3)
tensor(0.)
Using the :attr:`dim` argument to compute vector norms::
>>> c = torch.tensor([[1., 2., 3.],
... [-1, 1, 4]])
>>> LA.norm(c, dim=0)
tensor([1.4142, 2.2361, 5.0000])
>>> LA.norm(c, dim=1)
tensor([3.7417, 4.2426])
>>> LA.norm(c, ord=1, dim=1)
tensor([6., 6.])
Using the :attr:`dim` argument to compute matrix norms::
>>> A = torch.arange(8, dtype=torch.float).reshape(2, 2, 2)
>>> LA.norm(A, dim=(1,2))
tensor([ 3.7417, 11.2250])
>>> LA.norm(A[0, :, :]), LA.norm(A[1, :, :])
(tensor(3.7417), tensor(11.2250))
""")
vector_norm = _add_docstr(_linalg.linalg_vector_norm, r"""
linalg.vector_norm(x, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
Computes a vector norm.
If :attr:`x` is complex valued, it computes the norm of :attr:`x`\ `.abs()`
Supports input of float, double, cfloat and cdouble dtypes.
This function does not necessarily treat multidimensonal :attr:`x` as a batch of
vectors, instead:
- If :attr:`dim`\ `= None`, :attr:`x` will be flattened before the norm is computed.
- If :attr:`dim` is an `int` or a `tuple`, the norm will be computed over these dimensions
and the other dimensions will be treated as batch dimensions.
This behavior is for consistency with :func:`torch.linalg.norm`.
:attr:`ord` defines the vector norm that is computed. The following norms are supported:
====================== ===============================
:attr:`ord` vector norm
====================== ===============================
`2` (default) `2`-norm (see below)
`inf` `max(abs(x))`
`-inf` `min(abs(x))`
`0` `sum(x != 0)`
other `int` or `float` `sum(abs(x)^{ord})^{(1 / ord)}`
====================== ===============================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
:attr:`dtype` may be used to perform the computation in a more precise dtype.
It is semantically equivalent to calling ``linalg.vector_norm(x.to(dtype))``
but it is faster in some cases.
.. seealso::
:func:`torch.linalg.matrix_norm` computes a matrix norm.
Args:
x (Tensor): tensor, flattened by default, but this behavior can be
controlled using :attr:`dim`.
ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `2`
dim (int, Tuple[int], optional): dimensions over which to compute
the norm. See above for the behavior when :attr:`dim`\ `= None`.
Default: `None`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): type used to perform the accumulation and the return.
If specified, :attr:`x` is cast to :attr:`dtype` before performing the operation,
and the returned tensor’s type will be :attr:`dtype` if real and of its real counterpart if complex.
:attr:`dtype` may be complex if :attr:`x` is complex, otherwise it must be real.
:attr:`x` should be convertible without narrowing to :attr:`dtype`. Default: None
Returns:
A real-valued tensor, even when :attr:`x` is complex.
Examples::
>>> from torch import linalg as LA
>>> a = torch.arange(9, dtype=torch.float) - 4
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> B = a.reshape((3, 3))
>>> B
tensor([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> LA.vector_norm(a, ord=3.5)
tensor(5.4345)
>>> LA.vector_norm(B, ord=3.5)
tensor(5.4345)
""")
matrix_norm = _add_docstr(_linalg.linalg_matrix_norm, r"""
linalg.matrix_norm(A, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor
Computes a matrix norm.
If :attr:`A` is complex valued, it computes the norm of :attr:`A`\ `.abs()`
Support input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices: the norm will be computed over the
dimensions specified by the 2-tuple :attr:`dim` and the other dimensions will
be treated as batch dimensions. The output will have the same batch dimensions.
:attr:`ord` defines the matrix norm that is computed. The following norms are supported:
====================== ========================================================
:attr:`ord` matrix norm
====================== ========================================================
`'fro'` (default) Frobenius norm
`'nuc'` nuclear norm
`inf` `max(sum(abs(x), dim=1))`
`-inf` `min(sum(abs(x), dim=1))`
`1` `max(sum(abs(x), dim=0))`
`-1` `min(sum(abs(x), dim=0))`
`2` largest singular value
`-2` smallest singular value
====================== ========================================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
Args:
A (Tensor): tensor with two or more dimensions. By default its
shape is interpreted as `(*, m, n)` where `*` is zero or more
batch dimensions, but this behavior can be controlled using :attr:`dim`.
ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'fro'`
dim (Tuple[int, int], optional): dimensions over which to compute the norm. Default: `(-2, -1)`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): If specified, the input tensor is cast to
:attr:`dtype` before performing the operation, and the returned tensor's type
will be :attr:`dtype`. Default: `None`
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Examples::
>>> from torch import linalg as LA
>>> A = torch.arange(9, dtype=torch.float).reshape(3, 3)
>>> A
tensor([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> LA.matrix_norm(A)
tensor(14.2829)
>>> LA.matrix_norm(A, ord=-1)
tensor(9.)
>>> B = A.expand(2, -1, -1)
>>> B
tensor([[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]],
[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]])
>>> LA.matrix_norm(B)
tensor([14.2829, 14.2829])
>>> LA.matrix_norm(B, dim=(0, 2))
tensor([ 3.1623, 10.0000, 17.2627])
""")
matmul = _add_docstr(_linalg.linalg_matmul, r"""
linalg.matmul(input, other, *, out=None) -> Tensor
Alias for :func:`torch.matmul`
""")
diagonal = _add_docstr(_linalg.linalg_diagonal, r"""
linalg.diagonal(A, *, offset=0, dim1=-2, dim2=-1) -> Tensor
Alias for :func:`torch.diagonal` with defaults :attr:`dim1`\ `= -2`, :attr:`dim2`\ `= -1`.
""")
multi_dot = _add_docstr(_linalg.linalg_multi_dot, r"""
linalg.multi_dot(tensors, *, out=None)
Efficiently multiplies two or more matrices by reordering the multiplications so that
the fewest arithmetic operations are performed.
Supports inputs of float, double, cfloat and cdouble dtypes.
This function does not support batched inputs.
Every tensor in :attr:`tensors` must be 2D, except for the first and last which
may be 1D. If the first tensor is a 1D vector of shape `(n,)` it is treated as a row vector
of shape `(1, n)`, similarly if the last tensor is a 1D vector of shape `(n,)` it is treated
as a column vector of shape `(n, 1)`.
If the first and last tensors are matrices, the output will be a matrix.
However, if either is a 1D vector, then the output will be a 1D vector.
Differences with `numpy.linalg.multi_dot`:
- Unlike `numpy.linalg.multi_dot`, the first and last tensors must either be 1D or 2D
whereas NumPy allows them to be nD
.. warning:: This function does not broadcast.
.. note:: This function is implemented by chaining :func:`torch.mm` calls after
computing the optimal matrix multiplication order.
.. note:: The cost of multiplying two matrices with shapes `(a, b)` and `(b, c)` is
`a * b * c`. Given matrices `A`, `B`, `C` with shapes `(10, 100)`,
`(100, 5)`, `(5, 50)` respectively, we can calculate the cost of different
multiplication orders as follows:
.. math::
\begin{align*}
\operatorname{cost}((AB)C) &= 10 \times 100 \times 5 + 10 \times 5 \times 50 = 7500 \\
\operatorname{cost}(A(BC)) &= 10 \times 100 \times 50 + 100 \times 5 \times 50 = 75000
\end{align*}
In this case, multiplying `A` and `B` first followed by `C` is 10 times faster.
Args:
tensors (Sequence[Tensor]): two or more tensors to multiply. The first and last
tensors may be 1D or 2D. Every other tensor must be 2D.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> from torch.linalg import multi_dot
>>> multi_dot([torch.tensor([1, 2]), torch.tensor([2, 3])])
tensor(8)
>>> multi_dot([torch.tensor([[1, 2]]), torch.tensor([2, 3])])
tensor([8])
>>> multi_dot([torch.tensor([[1, 2]]), torch.tensor([[2], [3]])])
tensor([[8]])
>>> A = torch.arange(2 * 3).view(2, 3)
>>> B = torch.arange(3 * 2).view(3, 2)
>>> C = torch.arange(2 * 2).view(2, 2)
>>> multi_dot((A, B, C))
tensor([[ 26, 49],
[ 80, 148]])
""")
svd = _add_docstr(_linalg.linalg_svd, r"""
linalg.svd(A, full_matrices=True, *, driver=None, out=None) -> (Tensor, Tensor, Tensor)
Computes the singular value decomposition (SVD) of a matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **full SVD** of a matrix
:math:`A \in \mathbb{K}^{m \times n}`, if `k = min(m,n)`, is defined as
.. math::
A = U \operatorname{diag}(S) V^{\text{H}}
\mathrlap{\qquad U \in \mathbb{K}^{m \times m}, S \in \mathbb{R}^k, V \in \mathbb{K}^{n \times n}}
where :math:`\operatorname{diag}(S) \in \mathbb{K}^{m \times n}`,
:math:`V^{\text{H}}` is the conjugate transpose when :math:`V` is complex, and the transpose when :math:`V` is real-valued.
The matrices :math:`U`, :math:`V` (and thus :math:`V^{\text{H}}`) are orthogonal in the real case, and unitary in the complex case.
When `m > n` (resp. `m < n`) we can drop the last `m - n` (resp. `n - m`) columns of `U` (resp. `V`) to form the **reduced SVD**:
.. math::
A = U \operatorname{diag}(S) V^{\text{H}}
\mathrlap{\qquad U \in \mathbb{K}^{m \times k}, S \in \mathbb{R}^k, V \in \mathbb{K}^{k \times n}}
where :math:`\operatorname{diag}(S) \in \mathbb{K}^{k \times k}`.
In this case, :math:`U` and :math:`V` also have orthonormal columns.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The returned decomposition is a named tuple `(U, S, Vh)`
which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above.
The singular values are returned in descending order.
The parameter :attr:`full_matrices` chooses between the full (default) and reduced SVD.
The :attr:`driver` kwarg may be used in CUDA with a cuSOLVER backend to choose the algorithm used to compute the SVD.
The choice of a driver is a trade-off between accuracy and speed.
- If :attr:`A` is well-conditioned (its `condition number`_ is not too large), or you do not mind some precision loss.
- For a general matrix: `'gesvdj'` (Jacobi method)
- If :attr:`A` is tall or wide (`m >> n` or `m << n`): `'gesvda'` (Approximate method)
- If :attr:`A` is not well-conditioned or precision is relevant: `'gesvd'` (QR based)
By default (:attr:`driver`\ `= None`), we call `'gesvdj'` and, if it fails, we fallback to `'gesvd'`.
Differences with `numpy.linalg.svd`:
- Unlike `numpy.linalg.svd`, this function always returns a tuple of three tensors
and it doesn't support `compute_uv` argument.
Please use :func:`torch.linalg.svdvals`, which computes only the singular values,
instead of `compute_uv=False`.
.. note:: When :attr:`full_matrices`\ `= True`, the gradients with respect to `U[..., :, min(m, n):]`
and `Vh[..., min(m, n):, :]` will be ignored, as those vectors can be arbitrary bases
of the corresponding subspaces.
.. warning:: The returned tensors `U` and `V` are not unique, nor are they continuous with
respect to :attr:`A`.
Due to this lack of uniqueness, different hardware and software may compute
different singular vectors.
This non-uniqueness is caused by the fact that multiplying any pair of singular
vectors :math:`u_k, v_k` by `-1` in the real case or by
:math:`e^{i \phi}, \phi \in \mathbb{R}` in the complex case produces another two
valid singular vectors of the matrix.
For this reason, the loss function shall not depend on this :math:`e^{i \phi}` quantity,
as it is not well-defined.
This is checked for complex inputs when computing the gradients of this function. As such,
when inputs are complex and are on a CUDA device, the computation of the gradients
of this function synchronizes that device with the CPU.
.. warning:: Gradients computed using `U` or `Vh` will only be finite when
:attr:`A` does not have repeated singular values. If :attr:`A` is rectangular,
additionally, zero must also not be one of its singular values.
Furthermore, if the distance between any two singular values is close to zero,
the gradient will be numerically unstable, as it depends on the singular values
:math:`\sigma_i` through the computation of
:math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`.
In the rectangular case, the gradient will also be numerically unstable when
:attr:`A` has small singular values, as it also depends on the computation of
:math:`\frac{1}{\sigma_i}`.
.. seealso::
:func:`torch.linalg.svdvals` computes only the singular values.
Unlike :func:`torch.linalg.svd`, the gradients of :func:`~svdvals` are always
numerically stable.
:func:`torch.linalg.eig` for a function that computes another type of spectral
decomposition of a matrix. The eigendecomposition works just on square matrices.
:func:`torch.linalg.eigh` for a (faster) function that computes the eigenvalue decomposition
for Hermitian and symmetric matrices.
:func:`torch.linalg.qr` for another (much faster) decomposition that works on general
matrices.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
full_matrices (bool, optional): controls whether to compute the full or reduced
SVD, and consequently,
the shape of the returned tensors
`U` and `Vh`. Default: `True`.
Keyword args:
driver (str, optional): name of the cuSOLVER method to be used. This keyword argument only works on CUDA inputs.
Available options are: `None`, `gesvd`, `gesvdj`, and `gesvda`.
Default: `None`.
out (tuple, optional): output tuple of three tensors. Ignored if `None`.
Returns:
A named tuple `(U, S, Vh)` which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above.
`S` will always be real-valued, even when :attr:`A` is complex.
It will also be ordered in descending order.
`U` and `Vh` will have the same dtype as :attr:`A`. The left / right singular vectors will be given by
the columns of `U` and the rows of `Vh` respectively.
Examples::
>>> A = torch.randn(5, 3)
>>> U, S, Vh = torch.linalg.svd(A, full_matrices=False)
>>> U.shape, S.shape, Vh.shape
(torch.Size([5, 3]), torch.Size([3]), torch.Size([3, 3]))
>>> torch.dist(A, U @ torch.diag(S) @ Vh)
tensor(1.0486e-06)
>>> U, S, Vh = torch.linalg.svd(A)
>>> U.shape, S.shape, Vh.shape
(torch.Size([5, 5]), torch.Size([3]), torch.Size([3, 3]))
>>> torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh)
tensor(1.0486e-06)
>>> A = torch.randn(7, 5, 3)
>>> U, S, Vh = torch.linalg.svd(A, full_matrices=False)
>>> torch.dist(A, U @ torch.diag_embed(S) @ Vh)
tensor(3.0957e-06)
.. _condition number:
https://pytorch.org/docs/master/linalg.html#torch.linalg.cond
.. _the resulting vectors will span the same subspace:
https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD
""")
svdvals = _add_docstr(_linalg.linalg_svdvals, r"""
linalg.svdvals(A, *, driver=None, out=None) -> Tensor
Computes the singular values of a matrix.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The singular values are returned in descending order.
.. note:: This function is equivalent to NumPy's `linalg.svd(A, compute_uv=False)`.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. seealso::
:func:`torch.linalg.svd` computes the full singular value decomposition.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
Keyword args:
driver (str, optional): name of the cuSOLVER method to be used. This keyword argument only works on CUDA inputs.
Available options are: `None`, `gesvd`, `gesvdj`, and `gesvda`.
Check :func:`torch.linalg.svd` for details.
Default: `None`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Examples::
>>> A = torch.randn(5, 3)
>>> S = torch.linalg.svdvals(A)
>>> S
tensor([2.5139, 2.1087, 1.1066])
>>> torch.dist(S, torch.linalg.svd(A, full_matrices=False).S)
tensor(2.4576e-07)
""")
cond = _add_docstr(_linalg.linalg_cond, r"""
linalg.cond(A, p=None, *, out=None) -> Tensor
Computes the condition number of a matrix with respect to a matrix norm.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **condition number** :math:`\kappa` of a matrix
:math:`A \in \mathbb{K}^{n \times n}` is defined as
.. math::
\kappa(A) = \|A\|_p\|A^{-1}\|_p
The condition number of :attr:`A` measures the numerical stability of the linear system `AX = B`
with respect to a matrix norm.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
:attr:`p` defines the matrix norm that is computed. The following norms are supported:
========= =================================
:attr:`p` matrix norm
========= =================================
`None` `2`-norm (largest singular value)
`'fro'` Frobenius norm
`'nuc'` nuclear norm
`inf` `max(sum(abs(x), dim=1))`
`-inf` `min(sum(abs(x), dim=1))`
`1` `max(sum(abs(x), dim=0))`
`-1` `min(sum(abs(x), dim=0))`
`2` largest singular value
`-2` smallest singular value
========= =================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
For :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`, this function uses
:func:`torch.linalg.norm` and :func:`torch.linalg.inv`.
As such, in this case, the matrix (or every matrix in the batch) :attr:`A` has to be square
and invertible.
For :attr:`p` in `(2, -2)`, this function can be computed in terms of the singular values
:math:`\sigma_1 \geq \ldots \geq \sigma_n`
.. math::
\kappa_2(A) = \frac{\sigma_1}{\sigma_n}\qquad \kappa_{-2}(A) = \frac{\sigma_n}{\sigma_1}
In these cases, it is computed using :func:`torch.linalg.svdvals`. For these norms, the matrix
(or every matrix in the batch) :attr:`A` may have any shape.
.. note :: When inputs are on a CUDA device, this function synchronizes that device with the CPU
if :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`.
.. seealso::
:func:`torch.linalg.solve` for a function that solves linear systems of square matrices.
:func:`torch.linalg.lstsq` for a function that solves linear systems of general matrices.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions
for :attr:`p` in `(2, -2)`, and of shape `(*, n, n)` where every matrix
is invertible for :attr:`p` in `('fro', 'nuc', inf, -inf, 1, -1)`.
p (int, inf, -inf, 'fro', 'nuc', optional):
the type of the matrix norm to use in the computations (see above). Default: `None`
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Raises:
RuntimeError:
if :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`
and the :attr:`A` matrix or any matrix in the batch :attr:`A` is not square
or invertible.
Examples::
>>> A = torch.randn(3, 4, 4, dtype=torch.complex64)
>>> torch.linalg.cond(A)
>>> A = torch.tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
>>> torch.linalg.cond(A)
tensor([1.4142])
>>> torch.linalg.cond(A, 'fro')
tensor(3.1623)
>>> torch.linalg.cond(A, 'nuc')
tensor(9.2426)
>>> torch.linalg.cond(A, float('inf'))
tensor(2.)
>>> torch.linalg.cond(A, float('-inf'))
tensor(1.)
>>> torch.linalg.cond(A, 1)
tensor(2.)
>>> torch.linalg.cond(A, -1)
tensor(1.)
>>> torch.linalg.cond(A, 2)
tensor([1.4142])
>>> torch.linalg.cond(A, -2)
tensor([0.7071])
>>> A = torch.randn(2, 3, 3)
>>> torch.linalg.cond(A)
tensor([[9.5917],
[3.2538]])
>>> A = torch.randn(2, 3, 3, dtype=torch.complex64)
>>> torch.linalg.cond(A)
tensor([[4.6245],
[4.5671]])
""")
pinv = _add_docstr(_linalg.linalg_pinv, r"""
linalg.pinv(A, *, atol=None, rtol=None, hermitian=False, out=None) -> Tensor
Computes the pseudoinverse (Moore-Penrose inverse) of a matrix.
The pseudoinverse may be `defined algebraically`_
but it is more computationally convenient to understand it `through the SVD`_
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
If :attr:`hermitian`\ `= True`, :attr:`A` is assumed to be Hermitian if complex or
symmetric if real, but this is not checked internally. Instead, just the lower
triangular part of the matrix is used in the computations.
The singular values (or the norm of the eigenvalues when :attr:`hermitian`\ `= True`)
that are below :math:`\max(\text{atol}, \sigma_1 \cdot \text{rtol})` threshold are
treated as zero and discarded in the computation,
where :math:`\sigma_1` is the largest singular value (or eigenvalue).
If :attr:`rtol` is not specified and :attr:`A` is a matrix of dimensions `(m, n)`,
the relative tolerance is set to be :math:`\text{rtol} = \max(m, n) \varepsilon`
and :math:`\varepsilon` is the epsilon value for the dtype of :attr:`A` (see :class:`.finfo`).
If :attr:`rtol` is not specified and :attr:`atol` is specified to be larger than zero then
:attr:`rtol` is set to zero.
If :attr:`atol` or :attr:`rtol` is a :class:`torch.Tensor`, its shape must be broadcastable to that
of the singular values of :attr:`A` as returned by :func:`torch.linalg.svd`.
.. note:: This function uses :func:`torch.linalg.svd` if :attr:`hermitian`\ `= False` and
:func:`torch.linalg.eigh` if :attr:`hermitian`\ `= True`.
For CUDA inputs, this function synchronizes that device with the CPU.
.. note::
Consider using :func:`torch.linalg.lstsq` if possible for multiplying a matrix on the left by
the pseudoinverse, as::
torch.linalg.lstsq(A, B).solution == A.pinv() @ B
It is always prefered to use :func:`~lstsq` when possible, as it is faster and more
numerically stable than computing the pseudoinverse explicitly.
.. note::
This function has NumPy compatible variant `linalg.pinv(A, rcond, hermitian=False)`.
However, use of the positional argument :attr:`rcond` is deprecated in favor of :attr:`rtol`.
.. warning::
This function uses internally :func:`torch.linalg.svd` (or :func:`torch.linalg.eigh`
when :attr:`hermitian`\ `= True`), so its derivative has the same problems as those of these
functions. See the warnings in :func:`torch.linalg.svd` and :func:`torch.linalg.eigh` for
more details.
.. seealso::
:func:`torch.linalg.inv` computes the inverse of a square matrix.
:func:`torch.linalg.lstsq` computes :attr:`A`\ `.pinv() @ \ `:attr:`B` with a
numerically stable algorithm.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
rcond (float, Tensor, optional): [NumPy Compat]. Alias for :attr:`rtol`. Default: `None`.
Keyword args:
atol (float, Tensor, optional): the absolute tolerance value. When `None` it's considered to be zero.
Default: `None`.
rtol (float, Tensor, optional): the relative tolerance value. See above for the value it takes when `None`.
Default: `None`.
hermitian(bool, optional): indicates whether :attr:`A` is Hermitian if complex
or symmetric if real. Default: `False`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 5)
>>> A
tensor([[ 0.5495, 0.0979, -1.4092, -0.1128, 0.4132],
[-1.1143, -0.3662, 0.3042, 1.6374, -0.9294],
[-0.3269, -0.5745, -0.0382, -0.5922, -0.6759]])
>>> torch.linalg.pinv(A)
tensor([[ 0.0600, -0.1933, -0.2090],
[-0.0903, -0.0817, -0.4752],
[-0.7124, -0.1631, -0.2272],
[ 0.1356, 0.3933, -0.5023],
[-0.0308, -0.1725, -0.5216]])
>>> A = torch.randn(2, 6, 3)
>>> Apinv = torch.linalg.pinv(A)
>>> torch.dist(Apinv @ A, torch.eye(3))
tensor(8.5633e-07)
>>> A = torch.randn(3, 3, dtype=torch.complex64)
>>> A = A + A.T.conj() # creates a Hermitian matrix
>>> Apinv = torch.linalg.pinv(A, hermitian=True)
>>> torch.dist(Apinv @ A, torch.eye(3))
tensor(1.0830e-06)
.. _defined algebraically:
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Existence_and_uniqueness
.. _through the SVD:
https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Singular_value_decomposition_(SVD)
""")
matrix_exp = _add_docstr(_linalg.linalg_matrix_exp, r"""
linalg.matrix_exp(A) -> Tensor
Computes the matrix exponential of a square matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the **matrix exponential** of :math:`A \in \mathbb{K}^{n \times n}`, which is defined as
.. math::
\mathrm{matrix_exp}(A) = \sum_{k=0}^\infty \frac{1}{k!}A^k \in \mathbb{K}^{n \times n}.
If the matrix :math:`A` has eigenvalues :math:`\lambda_i \in \mathbb{C}`,
the matrix :math:`\mathrm{matrix_exp}(A)` has eigenvalues :math:`e^{\lambda_i} \in \mathbb{C}`.
Supports input of bfloat16, float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
Example::
>>> A = torch.empty(2, 2, 2)
>>> A[0, :, :] = torch.eye(2, 2)
>>> A[1, :, :] = 2 * torch.eye(2, 2)
>>> A
tensor([[[1., 0.],
[0., 1.]],
[[2., 0.],
[0., 2.]]])
>>> torch.linalg.matrix_exp(A)
tensor([[[2.7183, 0.0000],
[0.0000, 2.7183]],
[[7.3891, 0.0000],
[0.0000, 7.3891]]])
>>> import math
>>> A = torch.tensor([[0, math.pi/3], [-math.pi/3, 0]]) # A is skew-symmetric
>>> torch.linalg.matrix_exp(A) # matrix_exp(A) = [[cos(pi/3), sin(pi/3)], [-sin(pi/3), cos(pi/3)]]
tensor([[ 0.5000, 0.8660],
[-0.8660, 0.5000]])
""")
solve = _add_docstr(_linalg.linalg_solve, r"""
linalg.solve(A, B, *, left=True, out=None) -> Tensor
Computes the solution of a square system of linear equations with a unique solution.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system** associated to
:math:`A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{n \times k}`, which is defined as
.. math:: AX = B
If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that solves the system
.. math::
XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
This system of linear equations has one solution if and only if :math:`A` is `invertible`_.
This function assumes that :math:`A` is invertible.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
Letting `*` be zero or more batch dimensions,
- If :attr:`A` has shape `(*, n, n)` and :attr:`B` has shape `(*, n)` (a batch of vectors) or shape
`(*, n, k)` (a batch of matrices or "multiple right-hand sides"), this function returns `X` of shape
`(*, n)` or `(*, n, k)` respectively.
- Otherwise, if :attr:`A` has shape `(*, n, n)` and :attr:`B` has shape `(n,)` or `(n, k)`, :attr:`B`
is broadcasted to have shape `(*, n)` or `(*, n, k)` respectively.
This function then returns the solution of the resulting batch of systems of linear equations.
.. note::
This function computes `X = \ `:attr:`A`\ `.inverse() @ \ `:attr:`B` in a faster and
more numerically stable way than performing the computations separately.
.. note::
It is possible to compute the solution of the system :math:`XA = B` by passing the inputs
:attr:`A` and :attr:`B` transposed and transposing the output returned by this function.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. seealso::
:func:`torch.linalg.solve_triangular` computes the solution of a triangular system of linear
equations with a unique solution.
Args:
A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
B (Tensor): right-hand side tensor of shape `(*, n)` or `(*, n, k)` or `(n,)` or `(n, k)`
according to the rules described above
Keyword args:
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the :attr:`A` matrix is not invertible or any matrix in a batched :attr:`A`
is not invertible.
Examples::
>>> A = torch.randn(3, 3)
>>> b = torch.randn(3)
>>> x = torch.linalg.solve(A, b)
>>> torch.allclose(A @ x, b)
True
>>> A = torch.randn(2, 3, 3)
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.solve(A, B)
>>> X.shape
torch.Size([2, 3, 4])
>>> torch.allclose(A @ X, B)
True
>>> A = torch.randn(2, 3, 3)
>>> b = torch.randn(3, 1)
>>> x = torch.linalg.solve(A, b) # b is broadcasted to size (2, 3, 1)
>>> x.shape
torch.Size([2, 3, 1])
>>> torch.allclose(A @ x, b)
True
>>> b = torch.randn(3)
>>> x = torch.linalg.solve(A, b) # b is broadcasted to size (2, 3)
>>> x.shape
torch.Size([2, 3])
>>> Ax = A @ x.unsqueeze(-1)
>>> torch.allclose(Ax, b.unsqueeze(-1).expand_as(Ax))
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""")
solve_triangular = _add_docstr(_linalg.linalg_solve_triangular, r"""
linalg.solve_triangular(A, B, *, upper, left=True, unitriangular=False, out=None) -> Tensor
Computes the solution of a triangular system of linear equations with a unique solution.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system**
associated to the triangular matrix :math:`A \in \mathbb{K}^{n \times n}` without zeros on the diagonal
(that is, it is `invertible`_) and the rectangular matrix , :math:`B \in \mathbb{K}^{n \times k}`,
which is defined as
.. math:: AX = B
The argument :attr:`upper` signals whether :math:`A` is upper or lower triangular.
If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that
solves the system
.. math::
XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
If :attr:`upper`\ `= True` (resp. `False`) just the upper (resp. lower) triangular half of :attr:`A`
will be accessed. The elements below the main diagonal will be considered to be zero and will not be accessed.
If :attr:`unitriangular`\ `= True`, the diagonal of :attr:`A` is assumed to be ones and will not be accessed.
The result may contain `NaN` s if the diagonal of :attr:`A` contains zeros or elements that
are very close to zero and :attr:`unitriangular`\ `= False` (default) or if the input matrix
has very small eigenvalues.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.solve` computes the solution of a general square system of linear
equations with a unique solution.
Args:
A (Tensor): tensor of shape `(*, n, n)` (or `(*, k, k)` if :attr:`left`\ `= True`)
where `*` is zero or more batch dimensions.
B (Tensor): right-hand side tensor of shape `(*, n, k)`.
Keyword args:
upper (bool): whether :attr:`A` is an upper or lower triangular matrix.
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
unitriangular (bool, optional): if `True`, the diagonal elements of :attr:`A` are assumed to be
all equal to `1`. Default: `False`.
out (Tensor, optional): output tensor. `B` may be passed as `out` and the result is computed in-place on `B`.
Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3).triu_()
>>> b = torch.randn(3, 4)
>>> X = torch.linalg.solve_triangular(A, B, upper=True)
>>> torch.allclose(A @ X, B)
True
>>> A = torch.randn(2, 3, 3).tril_()
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.solve_triangular(A, B, upper=False)
>>> torch.allclose(A @ X, B)
True
>>> A = torch.randn(2, 4, 4).tril_()
>>> B = torch.randn(2, 3, 4)
>>> X = torch.linalg.solve_triangular(A, B, upper=False, left=False)
>>> torch.allclose(X @ A, B)
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""")
lu_factor = _add_docstr(_linalg.linalg_lu_factor, r"""
linalg.lu_factor(A, *, bool pivot=True, out=None) -> (Tensor, Tensor)
Computes a compact representation of the LU factorization with partial pivoting of a matrix.
This function computes a compact representation of the decomposition given by :func:`torch.linalg.lu`.
If the matrix is square, this representation may be used in :func:`torch.linalg.lu_solve`
to solve system of linear equations that share the matrix :attr:`A`.
The returned decomposition is represented as a named tuple `(LU, pivots)`.
The ``LU`` matrix has the same shape as the input matrix ``A``. Its upper and lower triangular
parts encode the non-constant elements of ``L`` and ``U`` of the LU decomposition of ``A``.
The returned permutation matrix is represented by a 1-indexed vector. `pivots[i] == j` represents
that in the `i`-th step of the algorithm, the `i`-th row was permuted with the `j-1`-th row.
On CUDA, one may use :attr:`pivot`\ `= False`. In this case, this function returns the LU
decomposition without pivoting if it exists.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
""" + fr"""
.. note:: {common_notes["sync_note_has_ex"].format("torch.linalg.lu_factor_ex")}
""" + r"""
.. warning:: The LU decomposition is almost never unique, as often there are different permutation
matrices that can yield different LU decompositions.
As such, different platforms, like SciPy, or inputs on different devices,
may produce different valid decompositions.
Gradient computations are only supported if the input matrix is full-rank.
If this condition is not met, no error will be thrown, but the gradient may not be finite.
This is because the LU decomposition with pivoting is not differentiable at these points.
.. seealso::
:func:`torch.linalg.lu_solve` solves a system of linear equations given the output of this
function provided the input matrix was square and invertible.
:func:`torch.lu_unpack` unpacks the tensors returned by :func:`~lu_factor` into the three
matrices `P, L, U` that form the decomposition.
:func:`torch.linalg.lu` computes the LU decomposition with partial pivoting of a possibly
non-square matrix. It is a composition of :func:`~lu_factor` and :func:`torch.lu_unpack`.
:func:`torch.linalg.solve` solves a system of linear equations. It is a composition
of :func:`~lu_factor` and :func:`~lu_solve`.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
Keyword args:
pivot (bool, optional): Whether to compute the LU decomposition with partial pivoting, or the regular LU
decomposition. :attr:`pivot`\ `= False` not supported on CPU. Default: `True`.
out (tuple, optional): tuple of two tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LU, pivots)`.
Raises:
RuntimeError: if the :attr:`A` matrix is not invertible or any matrix in a batched :attr:`A`
is not invertible.
Examples::
>>> A = torch.randn(2, 3, 3)
>>> B1 = torch.randn(2, 3, 4)
>>> B2 = torch.randn(2, 3, 7)
>>> A_factor = torch.linalg.lu_factor(A)
>>> X1 = torch.linalg.lu_solve(A_factor, B1)
>>> X2 = torch.linalg.lu_solve(A_factor, B2)
>>> torch.allclose(A @ X1, B1)
True
>>> torch.allclose(A @ X2, B2)
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""")
lu_factor_ex = _add_docstr(_linalg.linalg_lu_factor_ex, r"""
linalg.lu_factor_ex(A, *, pivot=True, check_errors=False, out=None) -> (Tensor, Tensor, Tensor)
This is a version of :func:`~lu_factor` that does not perform error checks unless :attr:`check_errors`\ `= True`.
It also returns the :attr:`info` tensor returned by `LAPACK's getrf`_.
""" + fr"""
.. note:: {common_notes["sync_note_ex"]}
.. warning:: {common_notes["experimental_warning"]}
""" + r"""
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
Keyword args:
pivot (bool, optional): Whether to compute the LU decomposition with partial pivoting, or the regular LU
decomposition. :attr:`pivot`\ `= False` not supported on CPU. Default: `True`.
check_errors (bool, optional): controls whether to check the content of ``infos`` and raise
an error if it is non-zero. Default: `False`.
out (tuple, optional): tuple of three tensors to write the output to. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(LU, pivots, info)`.
.. _LAPACK's getrf:
https://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html
""")
lu_solve = _add_docstr(_linalg.linalg_lu_solve, r"""
linalg.lu_solve(LU, pivots, B, *, left=True, adjoint=False, out=None) -> Tensor
Computes the solution of a square system of linear equations with a unique solution given an LU decomposition.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
this function computes the solution :math:`X \in \mathbb{K}^{n \times k}` of the **linear system** associated to
:math:`A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{n \times k}`, which is defined as
.. math:: AX = B
where :math:`A` is given factorized as returned by :func:`~lu_factor`.
If :attr:`left`\ `= False`, this function returns the matrix :math:`X \in \mathbb{K}^{n \times k}` that solves the system
.. math::
XA = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
If :attr:`adjoint`\ `= True` (and :attr:`left`\ `= True), given an LU factorization of :math:`A`
this function function returns the :math:`X \in \mathbb{K}^{n \times k}` that solves the system
.. math::
A^{\text{H}}X = B\mathrlap{\qquad A \in \mathbb{K}^{k \times k}, B \in \mathbb{K}^{n \times k}.}
where :math:`A^{\text{H}}` is the conjugate transpose when :math:`A` is complex, and the
transpose when :math:`A` is real-valued. The :attr:`left`\ `= False` case is analogous.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
Args:
LU (Tensor): tensor of shape `(*, n, n)` (or `(*, k, k)` if :attr:`left`\ `= True`)
where `*` is zero or more batch dimensions as returned by :func:`~lu_factor`.
pivots (Tensor): tensor of shape `(*, n)` (or `(*, k)` if :attr:`left`\ `= True`)
where `*` is zero or more batch dimensions as returned by :func:`~lu_factor`.
B (Tensor): right-hand side tensor of shape `(*, n, k)`.
Keyword args:
left (bool, optional): whether to solve the system :math:`AX=B` or :math:`XA = B`. Default: `True`.
adjoint (bool, optional): whether to solve the system :math:`AX=B` or :math:`A^{\text{H}}X = B`. Default: `False`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> A = torch.randn(3, 3)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> B = torch.randn(3, 2)
>>> X = torch.linalg.lu_solve(LU, pivots, B)
>>> torch.allclose(A @ X, B)
True
>>> B = torch.randn(3, 3, 2) # Broadcasting rules apply: A is broadcasted
>>> X = torch.linalg.lu_solve(LU, pivots, B)
>>> torch.allclose(A @ X, B)
True
>>> B = torch.randn(3, 5, 3)
>>> X = torch.linalg.lu_solve(LU, pivots, B, left=False)
>>> torch.allclose(X @ A, B)
True
>>> B = torch.randn(3, 3, 4) # Now solve for A^T
>>> X = torch.linalg.lu_solve(LU, pivots, B, adjoint=True)
>>> torch.allclose(A.mT @ X, B)
True
.. _invertible:
https://en.wikipedia.org/wiki/Invertible_matrix#The_invertible_matrix_theorem
""")
lu = _add_docstr(_linalg.linalg_lu, r"""
lu(A, *, pivot=True, out=None) -> (Tensor, Tensor, Tensor)
Computes the LU decomposition with partial pivoting of a matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **LU decomposition with partial pivoting** of a matrix
:math:`A \in \mathbb{K}^{m \times n}` is defined as
.. math::
A = PLU\mathrlap{\qquad P \in \mathbb{K}^{m \times m}, L \in \mathbb{K}^{m \times k}, U \in \mathbb{K}^{k \times n}}
where `k = min(m,n)`, :math:`P` is a `permutation matrix`_, :math:`L` is lower triangular with ones on the diagonal
and :math:`U` is upper triangular.
If :attr:`pivot`\ `= False` and :attr:`A` is on GPU, then the **LU decomposition without pivoting** is computed
.. math::
A = LU\mathrlap{\qquad L \in \mathbb{K}^{m \times k}, U \in \mathbb{K}^{k \times n}}
When :attr:`pivot`\ `= False`, the returned matrix :attr:`P` will be empty.
The LU decomposition without pivoting `may not exist`_ if any of the principal minors of :attr:`A` is singular.
In this case, the output matrix may contain `inf` or `NaN`.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
.. seealso::
:func:`torch.linalg.solve` solves a system of linear equations using the LU decomposition
with partial pivoting.
.. warning:: The LU decomposition is almost never unique, as often there are different permutation
matrices that can yield different LU decompositions.
As such, different platforms, like SciPy, or inputs on different devices,
may produce different valid decompositions.
.. warning:: Gradient computations are only supported if the input matrix is full-rank.
If this condition is not met, no error will be thrown, but the gradient
may not be finite.
This is because the LU decomposition with pivoting is not differentiable at these points.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
pivot (bool, optional): Controls whether to compute the LU decomposition with partial pivoting or
no pivoting. Default: `True`.
Keyword args:
out (tuple, optional): output tuple of three tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(P, L, U)`.
Examples::
>>> A = torch.randn(3, 2)
>>> P, L, U = torch.linalg.lu(A)
>>> P
tensor([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]])
>>> L
tensor([[1.0000, 0.0000],
[0.5007, 1.0000],
[0.0633, 0.9755]])
>>> U
tensor([[0.3771, 0.0489],
[0.0000, 0.9644]])
>>> torch.dist(A, P @ L @ U)
tensor(5.9605e-08)
>>> A = torch.randn(2, 5, 7, device="cuda")
>>> P, L, U = torch.linalg.lu(A, pivot=False)
>>> P
tensor([], device='cuda:0')
>>> torch.dist(A, L @ U)
tensor(1.0376e-06, device='cuda:0')
.. _permutation matrix:
https://en.wikipedia.org/wiki/Permutation_matrix
.. _may not exist:
https://en.wikipedia.org/wiki/LU_decomposition#Definitions
""")
tensorinv = _add_docstr(_linalg.linalg_tensorinv, r"""
linalg.tensorinv(A, ind=2, *, out=None) -> Tensor
Computes the multiplicative inverse of :func:`torch.tensordot`.
If `m` is the product of the first :attr:`ind` dimensions of :attr:`A` and `n` is the product of
the rest of the dimensions, this function expects `m` and `n` to be equal.
If this is the case, it computes a tensor `X` such that
`tensordot(\ `:attr:`A`\ `, X, \ `:attr:`ind`\ `)` is the identity matrix in dimension `m`.
`X` will have the shape of :attr:`A` but with the first :attr:`ind` dimensions pushed back to the end
.. code:: text
X.shape == A.shape[ind:] + A.shape[:ind]
Supports input of float, double, cfloat and cdouble dtypes.
.. note:: When :attr:`A` is a `2`-dimensional tensor and :attr:`ind`\ `= 1`,
this function computes the (multiplicative) inverse of :attr:`A`
(see :func:`torch.linalg.inv`).
.. note::
Consider using :func:`torch.linalg.tensorsolve` if possible for multiplying a tensor on the left
by the tensor inverse, as::
linalg.tensorsolve(A, B) == torch.tensordot(linalg.tensorinv(A), B) # When B is a tensor with shape A.shape[:B.ndim]
It is always prefered to use :func:`~tensorsolve` when possible, as it is faster and more
numerically stable than computing the pseudoinverse explicitly.
.. seealso::
:func:`torch.linalg.tensorsolve` computes
`torch.tensordot(tensorinv(\ `:attr:`A`\ `), \ `:attr:`B`\ `)`.
Args:
A (Tensor): tensor to invert. Its shape must satisfy
`prod(\ `:attr:`A`\ `.shape[:\ `:attr:`ind`\ `]) ==
prod(\ `:attr:`A`\ `.shape[\ `:attr:`ind`\ `:])`.
ind (int): index at which to compute the inverse of :func:`torch.tensordot`. Default: `2`.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the reshaped :attr:`A` is not invertible or the product of the first
:attr:`ind` dimensions is not equal to the product of the rest.
Examples::
>>> A = torch.eye(4 * 6).reshape((4, 6, 8, 3))
>>> Ainv = torch.linalg.tensorinv(A, ind=2)
>>> Ainv.shape
torch.Size([8, 3, 4, 6])
>>> B = torch.randn(4, 6)
>>> torch.allclose(torch.tensordot(Ainv, B), torch.linalg.tensorsolve(A, B))
True
>>> A = torch.randn(4, 4)
>>> Atensorinv = torch.linalg.tensorinv(A, ind=1)
>>> Ainv = torch.linalg.inverse(A)
>>> torch.allclose(Atensorinv, Ainv)
True
""")
tensorsolve = _add_docstr(_linalg.linalg_tensorsolve, r"""
linalg.tensorsolve(A, B, dims=None, *, out=None) -> Tensor
Computes the solution `X` to the system `torch.tensordot(A, X) = B`.
If `m` is the product of the first :attr:`B`\ `.ndim` dimensions of :attr:`A` and
`n` is the product of the rest of the dimensions, this function expects `m` and `n` to be equal.
The returned tensor `x` satisfies
`tensordot(\ `:attr:`A`\ `, x, dims=x.ndim) == \ `:attr:`B`.
`x` has shape :attr:`A`\ `[B.ndim:]`.
If :attr:`dims` is specified, :attr:`A` will be reshaped as
.. code:: text
A = movedim(A, dims, range(len(dims) - A.ndim + 1, 0))
Supports inputs of float, double, cfloat and cdouble dtypes.
.. seealso::
:func:`torch.linalg.tensorinv` computes the multiplicative inverse of
:func:`torch.tensordot`.
Args:
A (Tensor): tensor to solve for. Its shape must satisfy
`prod(\ `:attr:`A`\ `.shape[:\ `:attr:`B`\ `.ndim]) ==
prod(\ `:attr:`A`\ `.shape[\ `:attr:`B`\ `.ndim:])`.
B (Tensor): tensor of shape :attr:`A`\ `.shape[:\ `:attr:`B`\ `.ndim]`.
dims (Tuple[int], optional): dimensions of :attr:`A` to be moved.
If `None`, no dimensions are moved. Default: `None`.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Raises:
RuntimeError: if the reshaped :attr:`A`\ `.view(m, m)` with `m` as above is not
invertible or the product of the first :attr:`ind` dimensions is not equal
to the product of the rest of the dimensions.
Examples::
>>> A = torch.eye(2 * 3 * 4).reshape((2 * 3, 4, 2, 3, 4))
>>> B = torch.randn(2 * 3, 4)
>>> X = torch.linalg.tensorsolve(A, B)
>>> X.shape
torch.Size([2, 3, 4])
>>> torch.allclose(torch.tensordot(A, X, dims=X.ndim), B)
True
>>> A = torch.randn(6, 4, 4, 3, 2)
>>> B = torch.randn(4, 3, 2)
>>> X = torch.linalg.tensorsolve(A, B, dims=(0, 2))
>>> X.shape
torch.Size([6, 4])
>>> A = A.permute(1, 3, 4, 0, 2)
>>> A.shape[B.ndim:]
torch.Size([6, 4])
>>> torch.allclose(torch.tensordot(A, X, dims=X.ndim), B, atol=1e-6)
True
""")
qr = _add_docstr(_linalg.linalg_qr, r"""
qr(A, mode='reduced', *, out=None) -> (Tensor, Tensor)
Computes the QR decomposition of a matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **full QR decomposition** of a matrix
:math:`A \in \mathbb{K}^{m \times n}` is defined as
.. math::
A = QR\mathrlap{\qquad Q \in \mathbb{K}^{m \times m}, R \in \mathbb{K}^{m \times n}}
where :math:`Q` is orthogonal in the real case and unitary in the complex case,
and :math:`R` is upper triangular with real diagonal (even in the complex case).
When `m > n` (tall matrix), as `R` is upper triangular, its last `m - n` rows are zero.
In this case, we can drop the last `m - n` columns of `Q` to form the
**reduced QR decomposition**:
.. math::
A = QR\mathrlap{\qquad Q \in \mathbb{K}^{m \times n}, R \in \mathbb{K}^{n \times n}}
The reduced QR decomposition agrees with the full QR decomposition when `n >= m` (wide matrix).
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The parameter :attr:`mode` chooses between the full and reduced QR decomposition.
If :attr:`A` has shape `(*, m, n)`, denoting `k = min(m, n)`
- :attr:`mode`\ `= 'reduced'` (default): Returns `(Q, R)` of shapes `(*, m, k)`, `(*, k, n)` respectively.
It is always differentiable.
- :attr:`mode`\ `= 'complete'`: Returns `(Q, R)` of shapes `(*, m, m)`, `(*, m, n)` respectively.
It is differentiable for `m <= n`.
- :attr:`mode`\ `= 'r'`: Computes only the reduced `R`. Returns `(Q, R)` with `Q` empty and `R` of shape `(*, k, n)`.
It is never differentiable.
Differences with `numpy.linalg.qr`:
- :attr:`mode`\ `= 'raw'` is not implemented.
- Unlike `numpy.linalg.qr`, this function always returns a tuple of two tensors.
When :attr:`mode`\ `= 'r'`, the `Q` tensor is an empty tensor.
.. warning:: The elements in the diagonal of `R` are not necessarily positive.
As such, the returned QR decomposition is only unique up to the sign of the diagonal of `R`.
Therefore, different platforms, like NumPy, or inputs on different devices,
may produce different valid decompositions.
.. warning:: The QR decomposition is only well-defined if the first `k = min(m, n)` columns
of every matrix in :attr:`A` are linearly independent.
If this condition is not met, no error will be thrown, but the QR produced
may be incorrect and its autodiff may fail or produce incorrect results.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
mode (str, optional): one of `'reduced'`, `'complete'`, `'r'`.
Controls the shape of the returned tensors. Default: `'reduced'`.
Keyword args:
out (tuple, optional): output tuple of two tensors. Ignored if `None`. Default: `None`.
Returns:
A named tuple `(Q, R)`.
Examples::
>>> A = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> Q, R = torch.linalg.qr(A)
>>> Q
tensor([[-0.8571, 0.3943, 0.3314],
[-0.4286, -0.9029, -0.0343],
[ 0.2857, -0.1714, 0.9429]])
>>> R
tensor([[ -14.0000, -21.0000, 14.0000],
[ 0.0000, -175.0000, 70.0000],
[ 0.0000, 0.0000, -35.0000]])
>>> (Q @ R).round()
tensor([[ 12., -51., 4.],
[ 6., 167., -68.],
[ -4., 24., -41.]])
>>> (Q.T @ Q).round()
tensor([[ 1., 0., 0.],
[ 0., 1., -0.],
[ 0., -0., 1.]])
>>> Q2, R2 = torch.linalg.qr(A, mode='r')
>>> Q2
tensor([])
>>> torch.equal(R, R2)
True
>>> A = torch.randn(3, 4, 5)
>>> Q, R = torch.linalg.qr(A, mode='complete')
>>> torch.dist(Q @ R, A)
tensor(1.6099e-06)
>>> torch.dist(Q.mT @ Q, torch.eye(4))
tensor(6.2158e-07)
""")
vander = _add_docstr(_linalg.linalg_vander, r"""
vander(x, N=None) -> Tensor
Generates a Vandermonde matrix.
Returns the Vandermonde matrix :math:`V`
.. math::
V = \begin{pmatrix}
1 & x_1 & x_1^2 & \dots & x_1^{N-1}\\
1 & x_2 & x_2^2 & \dots & x_2^{N-1}\\
1 & x_3 & x_3^2 & \dots & x_3^{N-1}\\
\vdots & \vdots & \vdots & \ddots &\vdots \\
1 & x_n & x_n^2 & \dots & x_n^{N-1}
\end{pmatrix}.
for `N > 1`.
If :attr:`N`\ `= None`, then `N = x.size(-1)` so that the output is a square matrix.
Supports inputs of float, double, cfloat, cdouble, and integral dtypes.
Also supports batches of vectors, and if :attr:`x` is a batch of vectors then
the output has the same batch dimensions.
Differences with `numpy.vander`:
- Unlike `numpy.vander`, this function returns the powers of :attr:`x` in ascending order.
To get them in the reverse order call ``linalg.vander(x, N).flip(-1)``.
Args:
x (Tensor): tensor of shape `(*, n)` where `*` is zero or more batch dimensions
consisting of vectors.
Keyword args:
N (int, optional): Number of columns in the output. Default: `x.size(-1)`
Example::
>>> x = torch.tensor([1, 2, 3, 5])
>>> linalg.vander(x)
tensor([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
>>> linalg.vander(x, N=3)
tensor([[ 1, 1, 1],
[ 1, 2, 4],
[ 1, 3, 9],
[ 1, 5, 25]])
""")
vecdot = _add_docstr(_linalg.linalg_vecdot, r"""
linalg.vecdot(x, y, *, dim=-1, out=None) -> Tensor
Computes the dot product of two batches of vectors along a dimension.
In symbols, this function computes
.. math::
\sum_{i=1}^n \overline{x_i}y_i.
over the dimension :attr:`dim` where :math:`\overline{x_i}` denotes the conjugate for complex
vectors, and it is the identity for real vectors.
Supports input of half, bfloat16, float, double, cfloat, cdouble and integral dtypes.
It also supports broadcasting.
Args:
x (Tensor): first batch of vectors of shape `(*, n)`.
y (Tensor): second batch of vectors of shape `(*, n)`.
Keyword args:
dim (int): Dimension along which to compute the dot product. Default: `-1`.
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Examples::
>>> v1 = torch.randn(3, 2)
>>> v2 = torch.randn(3, 2)
>>> linalg.vecdot(v1, v2)
tensor([ 0.3223, 0.2815, -0.1944])
>>> torch.vdot(v1[0], v2[0])
tensor(0.3223)
""")
|
pytorch-master
|
torch/linalg/__init__.py
|
from typing import TypeVar, Union, Tuple, Optional
from .. import Tensor
# Create some useful type aliases
# Template for arguments which can be supplied as a tuple, or which can be a scalar which PyTorch will internally
# broadcast to a tuple.
# Comes in several variants: A tuple of unknown size, and a fixed-size tuple for 1d, 2d, or 3d operations.
T = TypeVar('T')
_scalar_or_tuple_any_t = Union[T, Tuple[T, ...]]
_scalar_or_tuple_1_t = Union[T, Tuple[T]]
_scalar_or_tuple_2_t = Union[T, Tuple[T, T]]
_scalar_or_tuple_3_t = Union[T, Tuple[T, T, T]]
_scalar_or_tuple_4_t = Union[T, Tuple[T, T, T, T]]
_scalar_or_tuple_5_t = Union[T, Tuple[T, T, T, T, T]]
_scalar_or_tuple_6_t = Union[T, Tuple[T, T, T, T, T, T]]
# For arguments which represent size parameters (eg, kernel size, padding)
_size_any_t = _scalar_or_tuple_any_t[int]
_size_1_t = _scalar_or_tuple_1_t[int]
_size_2_t = _scalar_or_tuple_2_t[int]
_size_3_t = _scalar_or_tuple_3_t[int]
_size_4_t = _scalar_or_tuple_4_t[int]
_size_5_t = _scalar_or_tuple_5_t[int]
_size_6_t = _scalar_or_tuple_6_t[int]
# For arguments which represent optional size parameters (eg, adaptive pool parameters)
_size_any_opt_t = _scalar_or_tuple_any_t[Optional[int]]
_size_2_opt_t = _scalar_or_tuple_2_t[Optional[int]]
_size_3_opt_t = _scalar_or_tuple_3_t[Optional[int]]
# For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters)
_ratio_2_t = _scalar_or_tuple_2_t[float]
_ratio_3_t = _scalar_or_tuple_3_t[float]
_ratio_any_t = _scalar_or_tuple_any_t[float]
_tensor_list_t = _scalar_or_tuple_any_t[Tensor]
# For the return value of max pooling operations that may or may not return indices.
# With the proposed 'Literal' feature to Python typing, it might be possible to
# eventually eliminate this.
_maybe_indices_t = _scalar_or_tuple_2_t[Tensor]
|
pytorch-master
|
torch/nn/common_types.py
|
from .modules import * # noqa: F403
from .parameter import (
Parameter as Parameter,
UninitializedParameter as UninitializedParameter,
UninitializedBuffer as UninitializedBuffer,
)
from .parallel import DataParallel as DataParallel
from . import init
from . import functional
from . import utils
def factory_kwargs(kwargs):
r"""
Given kwargs, returns a canonicalized dict of factory kwargs that can be directly passed
to factory functions like torch.empty, or errors if unrecognized kwargs are present.
This function makes it simple to write code like this::
class MyModule(nn.Module):
def __init__(self, **kwargs):
factory_kwargs = torch.nn.factory_kwargs(kwargs)
self.weight = Parameter(torch.empty(10, **factory_kwargs))
Why should you use this function instead of just passing `kwargs` along directly?
1. This function does error validation, so if there are unexpected kwargs we will
immediately report an error, instead of deferring it to the factory call
2. This function supports a special `factory_kwargs` argument, which can be used to
explicitly specify a kwarg to be used for factory functions, in the event one of the
factory kwargs conflicts with an already existing argument in the signature (e.g.
in the signature ``def f(dtype, **kwargs)``, you can specify ``dtype`` for factory
functions, as distinct from the dtype argument, by saying
``f(dtype1, factory_kwargs={"dtype": dtype2})``)
"""
if kwargs is None:
return {}
simple_keys = {"device", "dtype", "memory_format"}
expected_keys = simple_keys | {"factory_kwargs"}
if not kwargs.keys() <= expected_keys:
raise TypeError(f"unexpected kwargs {kwargs.keys() - expected_keys}")
# guarantee no input kwargs is untouched
r = dict(kwargs.get("factory_kwargs", {}))
for k in simple_keys:
if k in kwargs:
if k in r:
raise TypeError(f"{k} specified twice, in **kwargs and in factory_kwargs")
r[k] = kwargs[k]
return r
|
pytorch-master
|
torch/nn/__init__.py
|
from typing import Optional
import warnings
# NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h
def get_enum(reduction: str) -> int:
if reduction == 'none':
ret = 0
elif reduction == 'mean':
ret = 1
elif reduction == 'elementwise_mean':
warnings.warn("reduction='elementwise_mean' is deprecated, please use reduction='mean' instead.")
ret = 1
elif reduction == 'sum':
ret = 2
else:
ret = -1 # TODO: remove once JIT exceptions support control flow
raise ValueError("{} is not a valid value for reduction".format(reduction))
return ret
# In order to support previous versions, accept boolean size_average and reduce
# and convert them into the new constants for now
# We use these functions in torch/legacy as well, in which case we'll silence the warning
def legacy_get_string(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> str:
warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead."
if size_average is None:
size_average = True
if reduce is None:
reduce = True
if size_average and reduce:
ret = 'mean'
elif reduce:
ret = 'sum'
else:
ret = 'none'
if emit_warning:
warnings.warn(warning.format(ret))
return ret
def legacy_get_enum(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> int:
return get_enum(legacy_get_string(size_average, reduce, emit_warning))
|
pytorch-master
|
torch/nn/_reduction.py
|
"""Functionality for Python <-> C++ frontend inter-op."""
from torch import nn
class OrderedDictWrapper(object):
"""
A wrapper around a C++ OrderedDict that dynamically evaluates the
OrderedDict getter on a bound C++ module, such that new changes on the C++
side are picked up. Otherwise accessing e.g. ``cpp_module._parameters`` just
once would get a frozen copy of the parameters at the time of access.
``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__`` so
using properties does not work.
"""
def __init__(self, cpp_module, attr):
self.cpp_module = cpp_module
self.attr = attr
@property
def cpp_dict(self):
return getattr(self.cpp_module, self.attr)
# Magic methods cannot be assigned dynamically and bypass ``getattr``, so we
# must manually override them.
def items(self):
return self.cpp_dict.items()
def keys(self):
return self.cpp_dict.keys()
def values(self):
return self.cpp_dict.values()
def __iter__(self):
return self.cpp_dict.__iter__()
def __len__(self):
return self.cpp_dict.__len__()
def __contains__(self, key):
return self.cpp_dict.__contains__(key)
def __getitem__(self, key):
return self.cpp_dict.__getitem__(key)
class ModuleWrapper(nn.Module):
"""
A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and
delegates all access.
"""
def __init__(self, cpp_module):
# Assign before the super class constructor so ``self.training`` can be
# assigned to in the super class constructor.
self.cpp_module = cpp_module
super(ModuleWrapper, self).__init__()
self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment]
self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment]
self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment]
for attr in dir(cpp_module):
# Skip magic methods and the three attributes above.
if not attr.startswith("_"):
setattr(self, attr, getattr(self.cpp_module, attr))
def _apply(self, fn):
for param in self.parameters():
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for buf in self.buffers():
buf.data = fn(buf.data)
return self
# nn.Module defines training as a boolean
@property # type: ignore[override]
def training(self):
return self.cpp_module.training
@training.setter
def training(self, mode):
self.cpp_module.train(mode)
def __repr__(self):
return self.cpp_module.__repr__()
|
pytorch-master
|
torch/nn/cpp.py
|
r"""Functional interface"""
from typing import Callable, List, Optional, Tuple, Union
import math
import warnings
import torch
from torch import _VF
from torch._C import _infer_size, _add_docstr
from torch._torch_docs import reproducibility_notes, tf32_notes
# A workaround to support both TorchScript and MyPy:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from torch.types import _dtype as DType
else:
# The JIT doesn't understand Union, nor torch.dtype here
DType = int
from .._jit_internal import boolean_dispatch, _overload, BroadcastingList1, BroadcastingList2, BroadcastingList3
from ..overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from . import _reduction as _Reduction
from . import grad # noqa: F401
from .modules import utils
from .modules.utils import _single, _pair, _triple, _list_with_default
Tensor = torch.Tensor
conv1d = _add_docstr(
torch.conv1d,
r"""
conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 1D convolution over an input signal composed of several input
planes.
{tf32_note}
See :class:`~torch.nn.Conv1d` for details and output shape.
Note:
{cudnn_reproducibility_note}
Note:
This operator supports complex data types i.e. ``complex32, complex64, complex128``.
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: ``None``
stride: the stride of the convolving kernel. Can be a single number or
a one-element tuple `(sW,)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
single number or a one-element tuple `(padW,)`. Default: 0
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the same shape as the input. However, this mode
doesn't support any stride values other than 1.
.. warning::
For ``padding='same'``, if the ``weight`` is even-length and
``dilation`` is odd in any dimension, a full :func:`pad` operation
may be needed internally. Lowering performance.
dilation: the spacing between kernel elements. Can be a single number or
a one-element tuple `(dW,)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
the number of groups. Default: 1
Examples::
>>> inputs = torch.randn(33, 16, 30)
>>> filters = torch.randn(20, 16, 5)
>>> F.conv1d(inputs, filters)
""",
)
conv2d = _add_docstr(
torch.conv2d,
r"""
conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 2D convolution over an input image composed of several input
planes.
{tf32_note}
See :class:`~torch.nn.Conv2d` for details and output shape.
Note:
{cudnn_reproducibility_note}
Note:
This operator supports complex data types i.e. ``complex32, complex64, complex128``.
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None``
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
single number or a tuple `(padH, padW)`. Default: 0
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the same shape as the input. However, this mode
doesn't support any stride values other than 1.
.. warning::
For ``padding='same'``, if the ``weight`` is even-length and
``dilation`` is odd in any dimension, a full :func:`pad` operation
may be needed internally. Lowering performance.
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
Examples::
>>> # With square kernels and equal stride
>>> filters = torch.randn(8, 4, 3, 3)
>>> inputs = torch.randn(1, 4, 5, 5)
>>> F.conv2d(inputs, filters, padding=1)
""",
) # noqa: E501
conv3d = _add_docstr(
torch.conv3d,
r"""
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
Applies a 3D convolution over an input image composed of several input
planes.
{tf32_note}
See :class:`~torch.nn.Conv3d` for details and output shape.
Note:
{cudnn_reproducibility_note}
Note:
This operator supports complex data types i.e. ``complex32, complex64, complex128``.
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kT , kH , kW)`
bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sT, sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'},
single number or a tuple `(padT, padH, padW)`. Default: 0
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the same shape as the input. However, this mode
doesn't support any stride values other than 1.
.. warning::
For ``padding='same'``, if the ``weight`` is even-length and
``dilation`` is odd in any dimension, a full :func:`pad` operation
may be needed internally. Lowering performance.
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dT, dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by
the number of groups. Default: 1
Examples::
>>> filters = torch.randn(33, 16, 3, 3, 3)
>>> inputs = torch.randn(20, 16, 50, 10, 20)
>>> F.conv3d(inputs, filters)
""",
) # noqa: E501
conv_transpose1d = _add_docstr(
torch.conv_transpose1d,
r"""
conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 1D transposed convolution operator over an input signal
composed of several input planes, sometimes also called "deconvolution".
{tf32_note}
See :class:`~torch.nn.ConvTranspose1d` for details and output shape.
Note:
{cudnn_reproducibility_note}
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple ``(sW,)``. Default: 1
padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
sides of each dimension in the input. Can be a single number or a tuple
``(padW,)``. Default: 0
output_padding: additional size added to one side of each dimension in the
output shape. Can be a single number or a tuple ``(out_padW)``. Default: 0
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple ``(dW,)``. Default: 1
Examples::
>>> inputs = torch.randn(20, 16, 50)
>>> weights = torch.randn(16, 33, 5)
>>> F.conv_transpose1d(inputs, weights)
""",
)
conv_transpose2d = _add_docstr(
torch.conv_transpose2d,
r"""
conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 2D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution".
{tf32_note}
See :class:`~torch.nn.ConvTranspose2d` for details and output shape.
Note:
{cudnn_reproducibility_note}
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kH , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple ``(sH, sW)``. Default: 1
padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
sides of each dimension in the input. Can be a single number or a tuple
``(padH, padW)``. Default: 0
output_padding: additional size added to one side of each dimension in the
output shape. Can be a single number or a tuple ``(out_padH, out_padW)``.
Default: 0
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple ``(dH, dW)``. Default: 1
Examples::
>>> # With square kernels and equal stride
>>> inputs = torch.randn(1, 4, 5, 5)
>>> weights = torch.randn(4, 8, 3, 3)
>>> F.conv_transpose2d(inputs, weights, padding=1)
""",
) # noqa: E501
conv_transpose3d = _add_docstr(
torch.conv_transpose3d,
r"""
conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
Applies a 3D transposed convolution operator over an input image
composed of several input planes, sometimes also called "deconvolution"
{tf32_note}
See :class:`~torch.nn.ConvTranspose3d` for details and output shape.
Note:
{cudnn_reproducibility_note}
""".format(
**reproducibility_notes, **tf32_notes
)
+ r"""
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)`
weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kT , kH , kW)`
bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None
stride: the stride of the convolving kernel. Can be a single number or a
tuple ``(sT, sH, sW)``. Default: 1
padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both
sides of each dimension in the input. Can be a single number or a tuple
``(padT, padH, padW)``. Default: 0
output_padding: additional size added to one side of each dimension in the
output shape. Can be a single number or a tuple
``(out_padT, out_padH, out_padW)``. Default: 0
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dT, dH, dW)`. Default: 1
Examples::
>>> inputs = torch.randn(20, 16, 50, 10, 20)
>>> weights = torch.randn(16, 33, 3, 3, 3)
>>> F.conv_transpose3d(inputs, weights)
""",
) # noqa: E501
conv_tbc = _add_docstr(
torch.conv_tbc,
r"""
Applies a 1-dimensional sequence convolution over an input sequence.
Input and output dimensions are (Time, Batch, Channels) - hence TBC.
Args:
input: input tensor of shape :math:`(\text{sequence length} \times batch \times \text{in\_channels})`
weight: filter of shape (:math:`\text{kernel width} \times \text{in\_channels} \times \text{out\_channels}`)
bias: bias of shape (:math:`\text{out\_channels}`)
pad: number of timesteps to pad. Default: 0
""",
)
# Pooling
avg_pool1d = _add_docstr(
torch.avg_pool1d,
r"""
avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor
Applies a 1D average pooling over an input signal composed of several
input planes.
See :class:`~torch.nn.AvgPool1d` for details and output shape.
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
kernel_size: the size of the window. Can be a single number or a
tuple `(kW,)`
stride: the stride of the window. Can be a single number or a tuple
`(sW,)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padW,)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` to compute the
output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
Examples::
>>> # pool of square window of size=3, stride=2
>>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32)
>>> F.avg_pool1d(input, kernel_size=3, stride=2)
tensor([[[ 2., 4., 6.]]])
""",
)
avg_pool2d = _add_docstr(
torch._C._nn.avg_pool2d,
r"""
avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
:math:`sH \times sW` steps. The number of output features is equal to the number of
input planes.
See :class:`~torch.nn.AvgPool2d` for details and output shape.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
""",
)
avg_pool3d = _add_docstr(
torch._C._nn.avg_pool3d,
r"""
avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor
Applies 3D average-pooling operation in :math:`kT \times kH \times kW` regions by step
size :math:`sT \times sH \times sW` steps. The number of output features is equal to
:math:`\lfloor\frac{\text{input planes}}{sT}\rfloor`.
See :class:`~torch.nn.AvgPool3d` for details and output shape.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iT \times iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kT, kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padT, padH, padW)`, Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape
count_include_pad: when True, will include the zero-padding in the
averaging calculation
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
""",
)
def fractional_max_pool2d_with_indices(
input: Tensor, kernel_size: BroadcastingList2[int],
output_size: Optional[BroadcastingList2[int]] = None,
output_ratio: Optional[BroadcastingList2[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None
) -> Tuple[Tensor, Tensor]:
r"""Applies 2D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number :math:`k` (for a square kernel of :math:`k \times k`)
or a tuple `(kH, kW)`
output_size: the target output size of the image of the form :math:`oH \times oW`.
Can be a tuple `(oH, oW)` or a single number :math:`oH` for a square image :math:`oH \times oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :func:`~torch.nn.functional.max_unpool2d`.
Examples::
>>> input = torch.randn(20, 16, 50, 32)
>>> # pool of square window of size=3, and target output size 13x12
>>> F.fractional_max_pool2d(input, 3, output_size=(13, 12))
>>> # pool of square window and target output size being half of input image size
>>> F.fractional_max_pool2d(input, 3, output_ratio=(0.5, 0.5))
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool2d_with_indices,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
if output_size is None and output_ratio is None:
raise ValueError("fractional_max_pool2d requires specifying either " "an output_size or an output_ratio")
if output_size is None:
assert output_ratio is not None
_output_ratio = _pair(output_ratio)
output_size = [int(input.size(-2) * _output_ratio[0]), int(input.size(-1) * _output_ratio[1])]
if _random_samples is None:
n_batch = 1 if input.dim() == 3 else input.size(0)
_random_samples = torch.rand(n_batch, input.size(-3), 2, dtype=input.dtype, device=input.device)
return torch._C._nn.fractional_max_pool2d(input, kernel_size, output_size, _random_samples)
def _fractional_max_pool2d(
input: Tensor, kernel_size: BroadcastingList2[int],
output_size: Optional[BroadcastingList2[int]] = None,
output_ratio: Optional[BroadcastingList2[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None
) -> Tensor:
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool2d,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
return fractional_max_pool2d_with_indices(
input, kernel_size, output_size, output_ratio, return_indices, _random_samples
)[0]
fractional_max_pool2d = boolean_dispatch(
arg_name="return_indices",
arg_index=4,
default=False,
if_true=fractional_max_pool2d_with_indices,
if_false=_fractional_max_pool2d,
module_name=__name__,
func_name="fractional_max_pool2d",
)
def fractional_max_pool3d_with_indices(
input: Tensor, kernel_size: BroadcastingList3[int],
output_size: Optional[BroadcastingList3[int]] = None,
output_ratio: Optional[BroadcastingList3[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None
) -> Tuple[Tensor, Tensor]:
r"""Applies 3D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number :math:`k` (for a square kernel of :math:`k \times k \times k`)
or a tuple `(kT, kH, kW)`
output_size: the target output size of the form :math:`oT \times oH \times oW`.
Can be a tuple `(oT, oH, oW)` or a single number :math:`oH` for a cubic output
:math:`oH \times oH \times oH`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1)
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :func:`~torch.nn.functional.max_unpool3d`.
Shape:
- Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
- Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
:math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
Examples::
>>> input = torch.randn(20, 16, 50, 32, 16)
>>> # pool of cubic window of size=3, and target output size 13x12x11
>>> F.fractional_max_pool3d(input, 3, output_size=(13, 12, 11))
>>> # pool of cubic window and target output size being half of input size
>>> F.fractional_max_pool3d(input, 3, output_ratio=(0.5, 0.5, 0.5))
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool3d_with_indices,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
if output_size is None and output_ratio is None:
raise ValueError("fractional_max_pool3d requires specifying either " "an output_size or an output_ratio")
if output_size is None:
assert output_ratio is not None
_output_ratio = _triple(output_ratio)
output_size = [
int(input.size(-3) * _output_ratio[0]),
int(input.size(-2) * _output_ratio[1]),
int(input.size(-1) * _output_ratio[2]),
]
if _random_samples is None:
n_batch = 1 if input.dim() == 4 else input.size(0)
_random_samples = torch.rand(n_batch, input.size(-4), 3, dtype=input.dtype, device=input.device)
return torch._C._nn.fractional_max_pool3d(input, kernel_size, output_size, _random_samples)
def _fractional_max_pool3d(
input: Tensor, kernel_size: BroadcastingList3[int],
output_size: Optional[BroadcastingList3[int]] = None,
output_ratio: Optional[BroadcastingList3[float]] = None,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None
) -> Tensor:
if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool3d,
(input, _random_samples),
input,
kernel_size,
output_size=output_size,
output_ratio=output_ratio,
return_indices=return_indices,
_random_samples=_random_samples,
)
return fractional_max_pool3d_with_indices(
input, kernel_size, output_size, output_ratio, return_indices, _random_samples
)[0]
fractional_max_pool3d = boolean_dispatch(
arg_name="return_indices",
arg_index=4,
default=False,
if_true=fractional_max_pool3d_with_indices,
if_false=_fractional_max_pool3d,
module_name=__name__,
func_name="fractional_max_pool3d",
)
def max_pool1d_with_indices(
input: Tensor, kernel_size: BroadcastingList1[int],
stride: Optional[BroadcastingList1[int]] = None,
padding: BroadcastingList1[int] = 0,
dilation: BroadcastingList1[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False
) -> Tuple[Tensor, Tensor]:
r"""
max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
Applies a 1D max pooling over an input signal composed of several input
planes.
.. note::
The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
what seen in :class:`~torch.nn.MaxPool1d`, and will change in a future release.
See :class:`~torch.nn.MaxPool1d` for details.
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`, minibatch dim optional.
kernel_size: the size of the window. Can be a single number or a
tuple `(kW,)`
stride: the stride of the window. Can be a single number or a tuple
`(sW,)`. Default: :attr:`kernel_size`
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.functional.max_unpool1d` later
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_pool1d_with_indices,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch.max_pool1d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)
def _max_pool1d(
input: Tensor, kernel_size: BroadcastingList1[int],
stride: Optional[BroadcastingList1[int]] = None,
padding: BroadcastingList1[int] = 0,
dilation: BroadcastingList1[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
max_pool1d,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode)
max_pool1d = boolean_dispatch(
arg_name="return_indices",
arg_index=6,
default=False,
if_true=max_pool1d_with_indices,
if_false=_max_pool1d,
module_name=__name__,
func_name="max_pool1d",
)
def max_pool2d_with_indices(
input: Tensor, kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
padding: BroadcastingList2[int] = 0,
dilation: BroadcastingList2[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False
) -> Tuple[Tensor, Tensor]:
r"""
max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
Applies a 2D max pooling over an input signal composed of several input
planes.
.. note::
The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
what seen in :class:`~torch.nn.MaxPool2d`, and will change in a future release.
See :class:`~torch.nn.MaxPool2d` for details.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`, minibatch dim optional.
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sH, sW)`. Default: :attr:`kernel_size`
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.functional.max_unpool2d` later
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_pool2d_with_indices,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch._C._nn.max_pool2d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)
def _max_pool2d(
input: Tensor, kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
padding: BroadcastingList2[int] = 0,
dilation: BroadcastingList2[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
max_pool2d,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
max_pool2d = boolean_dispatch(
arg_name="return_indices",
arg_index=6,
default=False,
if_true=max_pool2d_with_indices,
if_false=_max_pool2d,
module_name=__name__,
func_name="max_pool2d",
)
def max_pool3d_with_indices(
input: Tensor, kernel_size: BroadcastingList3[int],
stride: Optional[BroadcastingList3[int]] = None,
padding: BroadcastingList3[int] = 0,
dilation: BroadcastingList3[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False
) -> Tuple[Tensor, Tensor]:
r"""
max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False)
Applies a 3D max pooling over an input signal composed of several input
planes.
.. note::
The order of :attr:`ceil_mode` and :attr:`return_indices` is different from
what seen in :class:`~torch.nn.MaxPool3d`, and will change in a future release.
See :class:`~torch.nn.MaxPool3d` for details.
Args:
input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iD, iH , iW)`, minibatch dim optional.
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kT, kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sT, sH, sW)`. Default: :attr:`kernel_size`
padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
dilation: The stride between elements within a sliding window, must be > 0.
ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
ensures that every element in the input tensor is covered by a sliding window.
return_indices: If ``True``, will return the argmax along with the max values.
Useful for :class:`torch.nn.functional.max_unpool3d` later
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_pool3d_with_indices,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch._C._nn.max_pool3d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode)
def _max_pool3d(
input: Tensor, kernel_size: BroadcastingList3[int],
stride: Optional[BroadcastingList3[int]] = None,
padding: BroadcastingList3[int] = 0,
dilation: BroadcastingList3[int] = 1,
ceil_mode: bool = False,
return_indices: bool = False
) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
max_pool3d,
(input,),
input,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=return_indices,
)
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch.max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode)
max_pool3d = boolean_dispatch(
arg_name="return_indices",
arg_index=6,
default=False,
if_true=max_pool3d_with_indices,
if_false=_max_pool3d,
module_name=__name__,
func_name="max_pool3d",
)
def _unpool_output_size(
input: Tensor, kernel_size: List[int], stride: List[int], padding: List[int], output_size: Optional[List[int]]
) -> List[int]:
input_size = input.size()
default_size = torch.jit.annotate(List[int], [])
for d in range(len(kernel_size)):
default_size.append((input_size[-len(kernel_size) + d] - 1) * stride[d] + kernel_size[d] - 2 * padding[d])
if output_size is None:
ret = default_size
else:
if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
if len(output_size) != len(kernel_size):
raise ValueError(
"output_size should be a sequence containing "
"{} or {} elements, but it has a length of '{}'".format(
len(kernel_size), len(kernel_size) + 2, len(output_size)
)
)
for d in range(len(kernel_size)):
min_size = default_size[d] - stride[d]
max_size = default_size[d] + stride[d]
if not (min_size < output_size[d] < max_size):
raise ValueError(
'invalid output_size "{}" (dim {} must be between {} and {})'.format(
output_size, d, min_size, max_size
)
)
ret = output_size
return ret
def max_unpool1d(
input: Tensor, indices: Tensor,
kernel_size: BroadcastingList1[int],
stride: Optional[BroadcastingList1[int]] = None,
padding: BroadcastingList1[int] = 0,
output_size: Optional[BroadcastingList1[int]] = None
) -> Tensor:
r"""Computes a partial inverse of :class:`MaxPool1d`.
See :class:`~torch.nn.MaxUnpool1d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_unpool1d,
(input,),
input,
indices,
kernel_size,
stride=stride,
padding=padding,
output_size=output_size,
)
kernel_size = _single(kernel_size)
if stride is not None:
_stride = _single(stride)
else:
_stride = kernel_size
padding = _single(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
if isinstance(output_size, list):
output_size = output_size + [1]
else:
output_size = output_size + (1,)
return torch._C._nn.max_unpool2d(input.unsqueeze(-1), indices.unsqueeze(-1), output_size).squeeze(-1)
def max_unpool2d(
input: Tensor, indices: Tensor,
kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
padding: BroadcastingList2[int] = 0,
output_size: Optional[BroadcastingList2[int]] = None
) -> Tensor:
r"""Computes a partial inverse of :class:`MaxPool2d`.
See :class:`~torch.nn.MaxUnpool2d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_unpool2d,
(input,),
input,
indices,
kernel_size,
stride=stride,
padding=padding,
output_size=output_size,
)
kernel_size = _pair(kernel_size)
if stride is not None:
_stride = _pair(stride)
else:
_stride = kernel_size
padding = _pair(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
return torch._C._nn.max_unpool2d(input, indices, output_size)
def max_unpool3d(
input: Tensor, indices: Tensor,
kernel_size: BroadcastingList3[int],
stride: Optional[BroadcastingList3[int]] = None,
padding: BroadcastingList3[int] = 0,
output_size: Optional[BroadcastingList3[int]] = None
) -> Tensor:
r"""Computes a partial inverse of :class:`MaxPool3d`.
See :class:`~torch.nn.MaxUnpool3d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
max_unpool3d,
(input,),
input,
indices,
kernel_size,
stride=stride,
padding=padding,
output_size=output_size,
)
kernel_size = _triple(kernel_size)
if stride is not None:
_stride = _triple(stride)
else:
_stride = kernel_size
padding = _triple(padding)
output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size)
return torch._C._nn.max_unpool3d(input, indices, output_size, _stride, padding)
def lp_pool2d(
input: Tensor, norm_type: Union[int, float],
kernel_size: BroadcastingList2[int],
stride: Optional[BroadcastingList2[int]] = None,
ceil_mode: bool = False
) -> Tensor:
r"""Applies a 2D power-average pooling over an input signal composed of
several input planes. If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool2d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
lp_pool2d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode
)
kw, kh = utils._pair(kernel_size)
if stride is not None:
out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
else:
out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)
return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1.0 / norm_type)
def lp_pool1d(
input: Tensor, norm_type: Union[int, float],
kernel_size: int,
stride: Optional[BroadcastingList1[int]] = None,
ceil_mode: bool = False
) -> Tensor:
r"""Applies a 1D power-average pooling over an input signal composed of
several input planes. If the sum of all inputs to the power of `p` is
zero, the gradient is set to zero as well.
See :class:`~torch.nn.LPPool1d` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
lp_pool1d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode
)
if stride is not None:
out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode)
else:
out = avg_pool1d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode)
return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1.0 / norm_type)
def adaptive_max_pool1d_with_indices(
input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False
) -> Tuple[Tensor, Tensor]:
r"""Applies a 1D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
return_indices: whether to return pooling indices. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool1d_with_indices, (input,), input, output_size, return_indices=return_indices
)
return torch.adaptive_max_pool1d(input, output_size)
def _adaptive_max_pool1d(input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool1d, (input,), input, output_size, return_indices=return_indices
)
return adaptive_max_pool1d_with_indices(input, output_size)[0]
adaptive_max_pool1d = boolean_dispatch(
arg_name="return_indices",
arg_index=2,
default=False,
if_true=adaptive_max_pool1d_with_indices,
if_false=_adaptive_max_pool1d,
module_name=__name__,
func_name="adaptive_max_pool1d",
)
def adaptive_max_pool2d_with_indices(
input: Tensor, output_size: BroadcastingList2[int],
return_indices: bool = False
) -> Tuple[Tensor, Tensor]:
r"""Applies a 2D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool2d_with_indices, (input,), input, output_size, return_indices=return_indices
)
output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_max_pool2d(input, output_size)
def _adaptive_max_pool2d(input: Tensor, output_size: BroadcastingList2[int], return_indices: bool = False) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool2d, (input,), input, output_size, return_indices=return_indices
)
return adaptive_max_pool2d_with_indices(input, output_size)[0]
adaptive_max_pool2d = boolean_dispatch(
arg_name="return_indices",
arg_index=2,
default=False,
if_true=adaptive_max_pool2d_with_indices,
if_false=_adaptive_max_pool2d,
module_name=__name__,
func_name="adaptive_max_pool2d",
)
def adaptive_max_pool3d_with_indices(
input: Tensor, output_size: BroadcastingList3[int],
return_indices: bool = False
) -> Tuple[Tensor, Tensor]:
r"""Applies a 3D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool3d_with_indices, (input,), input, output_size, return_indices=return_indices
)
output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_max_pool3d(input, output_size)
def _adaptive_max_pool3d(input: Tensor, output_size: BroadcastingList3[int], return_indices: bool = False) -> Tensor:
if has_torch_function_unary(input):
return handle_torch_function(
adaptive_max_pool3d, (input,), input, output_size, return_indices=return_indices
)
return adaptive_max_pool3d_with_indices(input, output_size)[0]
adaptive_max_pool3d = boolean_dispatch(
arg_name="return_indices",
arg_index=2,
default=False,
if_true=adaptive_max_pool3d_with_indices,
if_false=_adaptive_max_pool3d,
module_name=__name__,
func_name="adaptive_max_pool3d",
)
adaptive_avg_pool1d = _add_docstr(
torch.adaptive_avg_pool1d,
r"""
adaptive_avg_pool1d(input, output_size) -> Tensor
Applies a 1D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape.
Args:
output_size: the target output size (single integer)
""",
)
def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
r"""
Applies a 2D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
"""
if has_torch_function_unary(input):
return handle_torch_function(adaptive_avg_pool2d, (input,), input, output_size)
_output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_avg_pool2d(input, _output_size)
def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList3[int]) -> Tensor:
r"""
Applies a 3D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
triple-integer tuple)
"""
if has_torch_function_unary(input):
return handle_torch_function(adaptive_avg_pool3d, (input,), input, output_size)
_output_size = _list_with_default(output_size, input.size())
return torch._C._nn.adaptive_avg_pool3d(input, _output_size)
# Activation functions
def dropout(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
r"""
During training, randomly zeroes some of the elements of the input
tensor with probability :attr:`p` using samples from a Bernoulli
distribution.
See :class:`~torch.nn.Dropout` for details.
Args:
p: probability of an element to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(dropout, (input,), input, p=p, training=training, inplace=inplace)
if p < 0.0 or p > 1.0:
raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
return _VF.dropout_(input, p, training) if inplace else _VF.dropout(input, p, training)
def alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor:
r"""Applies alpha dropout to the input.
See :class:`~torch.nn.AlphaDropout` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(alpha_dropout, (input,), input, p=p, training=training, inplace=inplace)
if p < 0.0 or p > 1.0:
raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
return _VF.alpha_dropout_(input, p, training) if inplace else _VF.alpha_dropout(input, p, training)
def dropout1d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
r"""
Randomly zero out entire channels (a channel is a 1D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 1D tensor :math:`\text{input}[i, j]`) of the input tensor).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
See :class:`~torch.nn.Dropout1d` for details.
Args:
p: probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(dropout1d, (input,), input, p=p, training=training, inplace=inplace)
if p < 0.0 or p > 1.0:
raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
inp_dim = input.dim()
if inp_dim not in (2, 3):
raise RuntimeError(f"dropout1d: Expected 2D or 3D input, but received a {inp_dim}D input. "
"Note that dropout1d exists to provide channel-wise dropout on inputs with 1 "
"spatial dimension, a channel dimension, and an optional batch dimension "
"(i.e. 2D or 3D inputs).")
is_batched = inp_dim == 3
if not is_batched:
input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)
result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)
if not is_batched:
result = result.squeeze_(0) if inplace else result.squeeze(0)
return result
def dropout2d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
r"""
Randomly zero out entire channels (a channel is a 2D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 2D tensor :math:`\text{input}[i, j]`) of the input tensor).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
See :class:`~torch.nn.Dropout2d` for details.
Args:
p: probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(dropout2d, (input,), input, p=p, training=training, inplace=inplace)
if p < 0.0 or p > 1.0:
raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
inp_dim = input.dim()
if inp_dim not in (3, 4):
warn_msg = (f"dropout2d: Received a {inp_dim}-D input to dropout2d, which is deprecated "
"and will result in an error in a future release. To retain the behavior "
"and silence this warning, please use dropout instead. Note that dropout2d "
"exists to provide channel-wise dropout on inputs with 2 spatial dimensions, "
"a channel dimension, and an optional batch dimension (i.e. 3D or 4D inputs).")
warnings.warn(warn_msg)
# TODO: Properly support no-batch-dim inputs. For now, these are NOT supported; passing
# a 3D input will perform dropout1d behavior instead. This was done historically and the
# behavior is maintained here for now.
# See https://github.com/pytorch/pytorch/issues/77081
if inp_dim == 3:
warnings.warn("dropout2d: Received a 3D input to dropout2d and assuming that channel-wise "
"1D dropout behavior is desired - input is interpreted as shape (N, C, L), where C "
"is the channel dim. This behavior will change in a future release to interpret the "
"input as one without a batch dimension, i.e. shape (C, H, W). To maintain the 1D "
"channel-wise dropout behavior, please switch to using dropout1d instead.")
result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)
return result
def dropout3d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor:
r"""
Randomly zero out entire channels (a channel is a 3D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 3D tensor :math:`\text{input}[i, j]`) of the input tensor).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
See :class:`~torch.nn.Dropout3d` for details.
Args:
p: probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(dropout3d, (input,), input, p=p, training=training, inplace=inplace)
if p < 0.0 or p > 1.0:
raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
inp_dim = input.dim()
if inp_dim not in (4, 5):
warn_msg = (f"dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated "
"and will result in an error in a future release. To retain the behavior "
"and silence this warning, please use dropout instead. Note that dropout3d "
"exists to provide channel-wise dropout on inputs with 3 spatial dimensions, "
"a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs).")
warnings.warn(warn_msg)
is_batched = inp_dim == 5
if not is_batched:
input = input.unsqueeze_(0) if inplace else input.unsqueeze(0)
result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)
if not is_batched:
result = result.squeeze_(0) if inplace else result.squeeze(0)
return result
def feature_alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor:
r"""
Randomly masks out entire channels (a channel is a feature map,
e.g. the :math:`j`-th channel of the :math:`i`-th sample in the batch input
is a tensor :math:`\text{input}[i, j]`) of the input tensor). Instead of
setting activations to zero, as in regular Dropout, the activations are set
to the negative saturation value of the SELU activation function.
Each element will be masked independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
The elements to be masked are randomized on every forward call, and scaled
and shifted to maintain zero mean and unit variance.
See :class:`~torch.nn.FeatureAlphaDropout` for details.
Args:
p: dropout probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
"""
if has_torch_function_unary(input):
return handle_torch_function(
feature_alpha_dropout, (input,), input, p=p, training=training, inplace=inplace
)
if p < 0.0 or p > 1.0:
raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p))
return _VF.feature_alpha_dropout_(input, p, training) if inplace else _VF.feature_alpha_dropout(input, p, training)
def _threshold(input: Tensor, threshold: float, value: float, inplace: bool = False) -> Tensor:
r"""Thresholds each element of the input Tensor.
See :class:`~torch.nn.Threshold` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(_threshold, (input,), input, threshold, value, inplace=inplace)
if inplace:
result = _VF.threshold_(input, threshold, value)
else:
result = _VF.threshold(input, threshold, value)
return result
# We define this function as _threshold because it takes an argument
# named threshold, which clobbers the recursive reference to the
# function needed for __torch_function__ support
threshold = _threshold
threshold_ = _add_docstr(
_VF.threshold_,
r"""
threshold_(input, threshold, value) -> Tensor
In-place version of :func:`~threshold`.
""",
)
def relu(input: Tensor, inplace: bool = False) -> Tensor:
r"""relu(input, inplace=False) -> Tensor
Applies the rectified linear unit function element-wise. See
:class:`~torch.nn.ReLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(relu, (input,), input, inplace=inplace)
if inplace:
result = torch.relu_(input)
else:
result = torch.relu(input)
return result
relu_ = _add_docstr(
torch.relu_,
r"""
relu_(input) -> Tensor
In-place version of :func:`~relu`.
""",
)
def glu(input: Tensor, dim: int = -1) -> Tensor:
r"""
glu(input, dim=-1) -> Tensor
The gated linear unit. Computes:
.. math ::
\text{GLU}(a, b) = a \otimes \sigma(b)
where `input` is split in half along `dim` to form `a` and `b`, :math:`\sigma`
is the sigmoid function and :math:`\otimes` is the element-wise product between matrices.
See `Language Modeling with Gated Convolutional Networks <https://arxiv.org/abs/1612.08083>`_.
Args:
input (Tensor): input tensor
dim (int): dimension on which to split the input. Default: -1
"""
if has_torch_function_unary(input):
return handle_torch_function(glu, (input,), input, dim=dim)
if input.dim() == 0:
raise RuntimeError("glu does not support scalars because halving size must be even")
return torch._C._nn.glu(input, dim)
def hardtanh(input: Tensor, min_val: float = -1., max_val: float = 1., inplace: bool = False) -> Tensor:
r"""
hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor
Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more
details.
"""
if has_torch_function_unary(input):
return handle_torch_function(hardtanh, (input,), input, min_val=min_val, max_val=max_val, inplace=inplace)
if inplace:
result = torch._C._nn.hardtanh_(input, min_val, max_val)
else:
result = torch._C._nn.hardtanh(input, min_val, max_val)
return result
hardtanh_ = _add_docstr(
torch._C._nn.hardtanh_,
r"""
hardtanh_(input, min_val=-1., max_val=1.) -> Tensor
In-place version of :func:`~hardtanh`.
""",
)
def relu6(input: Tensor, inplace: bool = False) -> Tensor:
r"""relu6(input, inplace=False) -> Tensor
Applies the element-wise function :math:`\text{ReLU6}(x) = \min(\max(0,x), 6)`.
See :class:`~torch.nn.ReLU6` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(relu6, (input,), input, inplace=inplace)
if inplace:
result = torch._C._nn.relu6_(input)
else:
result = torch._C._nn.relu6(input)
return result
def elu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor:
r"""Applies the Exponential Linear Unit (ELU) function element-wise.
See :class:`~torch.nn.ELU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(elu, (input,), input, alpha=alpha, inplace=inplace)
if inplace:
result = torch._C._nn.elu_(input, alpha)
else:
result = torch._C._nn.elu(input, alpha)
return result
elu_ = _add_docstr(
torch._C._nn.elu_,
r"""
elu_(input, alpha=1.) -> Tensor
In-place version of :func:`~elu`.
""",
)
def selu(input: Tensor, inplace: bool = False) -> Tensor:
r"""selu(input, inplace=False) -> Tensor
Applies element-wise,
:math:`\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`,
with :math:`\alpha=1.6732632423543772848170429916717` and
:math:`scale=1.0507009873554804934193349852946`.
See :class:`~torch.nn.SELU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(selu, (input,), input, inplace=inplace)
if inplace:
result = torch.selu_(input)
else:
result = torch.selu(input)
return result
selu_ = _add_docstr(
torch.selu_,
r"""
selu_(input) -> Tensor
In-place version of :func:`~selu`.
""",
)
def celu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor:
r"""celu(input, alpha=1., inplace=False) -> Tensor
Applies element-wise,
:math:`\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))`.
See :class:`~torch.nn.CELU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(celu, (input,), input, alpha=alpha, inplace=inplace)
if inplace:
result = torch.celu_(input, alpha)
else:
result = torch.celu(input, alpha)
return result
celu_ = _add_docstr(
torch.celu_,
r"""
celu_(input, alpha=1.) -> Tensor
In-place version of :func:`~celu`.
""",
)
def leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False) -> Tensor:
r"""
leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor
Applies element-wise,
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
See :class:`~torch.nn.LeakyReLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(leaky_relu, (input,), input, negative_slope=negative_slope, inplace=inplace)
if inplace:
result = torch._C._nn.leaky_relu_(input, negative_slope)
else:
result = torch._C._nn.leaky_relu(input, negative_slope)
return result
leaky_relu_ = _add_docstr(
torch._C._nn.leaky_relu_,
r"""
leaky_relu_(input, negative_slope=0.01) -> Tensor
In-place version of :func:`~leaky_relu`.
""",
)
prelu = _add_docstr(
torch.prelu,
r"""prelu(input, weight) -> Tensor
Applies element-wise the function
:math:`\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)` where weight is a
learnable parameter.
.. note::
`weight` is expected to be a scalar or 1-D tensor. If `weight` is 1-D,
its size must match the number of input channels, determined by
`input.size(1)` when `input.dim() >= 2`, otherwise 1.
In the 1-D case, note that when `input` has dim > 2, `weight` can be expanded
to the shape of `input` in a way that is not possible using normal
:ref:`broadcasting semantics<broadcasting-semantics>`.
See :class:`~torch.nn.PReLU` for more details.
""")
def rrelu(
input: Tensor, lower: float = 1.0 / 8, upper: float = 1.0 / 3, training: bool = False, inplace: bool = False
) -> Tensor:
r"""rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor
Randomized leaky ReLU.
See :class:`~torch.nn.RReLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(
rrelu, (input,), input, lower=lower, upper=upper, training=training, inplace=inplace
)
if inplace:
result = torch.rrelu_(input, lower, upper, training)
else:
result = torch.rrelu(input, lower, upper, training)
return result
rrelu_ = _add_docstr(
torch.rrelu_,
r"""
rrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor
In-place version of :func:`~rrelu`.
""",
)
logsigmoid = _add_docstr(
torch._C._nn.log_sigmoid,
r"""
logsigmoid(input) -> Tensor
Applies element-wise :math:`\text{LogSigmoid}(x_i) = \log \left(\frac{1}{1 + \exp(-x_i)}\right)`
See :class:`~torch.nn.LogSigmoid` for more details.
""",
)
gelu = _add_docstr(
torch._C._nn.gelu,
r"""
gelu(input, approximate = 'none') -> Tensor
When the approximate argument is 'none', it applies element-wise the function
:math:`\text{GELU}(x) = x * \Phi(x)`
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
When the approximate argument is 'tanh', Gelu is estimated with:
:math:: \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt(2 / \pi) * (x + 0.044715 * x^3)))
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
""")
hardshrink = _add_docstr(
torch.hardshrink,
r"""
hardshrink(input, lambd=0.5) -> Tensor
Applies the hard shrinkage function element-wise
See :class:`~torch.nn.Hardshrink` for more details.
""")
def tanhshrink(input):
r"""tanhshrink(input) -> Tensor
Applies element-wise, :math:`\text{Tanhshrink}(x) = x - \text{Tanh}(x)`
See :class:`~torch.nn.Tanhshrink` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(tanhshrink, (input,), input)
return input - input.tanh()
def softsign(input):
r"""softsign(input) -> Tensor
Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}`
See :class:`~torch.nn.Softsign` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(softsign, (input,), input)
return input / (input.abs() + 1)
softplus = _add_docstr(
torch._C._nn.softplus,
r"""
softplus(input, beta=1, threshold=20) -> Tensor
Applies element-wise, the function :math:`\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))`.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
See :class:`~torch.nn.Softplus` for more details.
""",
)
def _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int:
warnings.warn(
"Implicit dimension choice for {} has been deprecated. "
"Change the call to include dim=X as an argument.".format(name),
stacklevel=stacklevel,
)
if ndim == 0 or ndim == 1 or ndim == 3:
ret = 0
else:
ret = 1
return ret
def softmin(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:
r"""Applies a softmin function.
Note that :math:`\text{Softmin}(x) = \text{Softmax}(-x)`. See softmax definition for mathematical formula.
See :class:`~torch.nn.Softmin` for more details.
Args:
input (Tensor): input
dim (int): A dimension along which softmin will be computed (so every slice
along dim will sum to 1).
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
"""
if has_torch_function_unary(input):
return handle_torch_function(softmin, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)
if dim is None:
dim = _get_softmax_dim("softmin", input.dim(), _stacklevel)
if dtype is None:
ret = (-input).softmax(dim)
else:
ret = (-input).softmax(dim, dtype=dtype)
return ret
def softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:
r"""Applies a softmax function.
Softmax is defined as:
:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `[0, 1]` and sum to 1.
See :class:`~torch.nn.Softmax` for more details.
Args:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
.. note::
This function doesn't work directly with NLLLoss,
which expects the Log to be computed between the Softmax and itself.
Use log_softmax instead (it's faster and has better numerical properties).
"""
if has_torch_function_unary(input):
return handle_torch_function(softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)
if dim is None:
dim = _get_softmax_dim("softmax", input.dim(), _stacklevel)
if dtype is None:
ret = input.softmax(dim)
else:
ret = input.softmax(dim, dtype=dtype)
return ret
def gumbel_softmax(logits: Tensor, tau: float = 1, hard: bool = False, eps: float = 1e-10, dim: int = -1) -> Tensor:
r"""
Samples from the Gumbel-Softmax distribution (`Link 1`_ `Link 2`_) and optionally discretizes.
Args:
logits: `[..., num_features]` unnormalized log probabilities
tau: non-negative scalar temperature
hard: if ``True``, the returned samples will be discretized as one-hot vectors,
but will be differentiated as if it is the soft sample in autograd
dim (int): A dimension along which softmax will be computed. Default: -1.
Returns:
Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution.
If ``hard=True``, the returned samples will be one-hot, otherwise they will
be probability distributions that sum to 1 across `dim`.
.. note::
This function is here for legacy reasons, may be removed from nn.Functional in the future.
.. note::
The main trick for `hard` is to do `y_hard - y_soft.detach() + y_soft`
It achieves two things:
- makes the output value exactly one-hot
(since we add then subtract y_soft value)
- makes the gradient equal to y_soft gradient
(since we strip all other gradients)
Examples::
>>> logits = torch.randn(20, 32)
>>> # Sample soft categorical using reparametrization trick:
>>> F.gumbel_softmax(logits, tau=1, hard=False)
>>> # Sample hard categorical using "Straight-through" trick:
>>> F.gumbel_softmax(logits, tau=1, hard=True)
.. _Link 1:
https://arxiv.org/abs/1611.00712
.. _Link 2:
https://arxiv.org/abs/1611.01144
"""
if has_torch_function_unary(logits):
return handle_torch_function(gumbel_softmax, (logits,), logits, tau=tau, hard=hard, eps=eps, dim=dim)
if eps != 1e-10:
warnings.warn("`eps` parameter is deprecated and has no effect.")
gumbels = (
-torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log()
) # ~Gumbel(0,1)
gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
y_soft = gumbels.softmax(dim)
if hard:
# Straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
# Reparametrization trick.
ret = y_soft
return ret
def log_softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor:
r"""Applies a softmax followed by a logarithm.
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
See :class:`~torch.nn.LogSoftmax` for more details.
Args:
input (Tensor): input
dim (int): A dimension along which log_softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is cast to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
"""
if has_torch_function_unary(input):
return handle_torch_function(log_softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype)
if dim is None:
dim = _get_softmax_dim("log_softmax", input.dim(), _stacklevel)
if dtype is None:
ret = input.log_softmax(dim)
else:
ret = input.log_softmax(dim, dtype=dtype)
return ret
softshrink = _add_docstr(
torch._C._nn.softshrink,
r"""
softshrink(input, lambd=0.5) -> Tensor
Applies the soft shrinkage function elementwise
See :class:`~torch.nn.Softshrink` for more details.
""",
)
def tanh(input):
r"""tanh(input) -> Tensor
Applies element-wise,
:math:`\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)}{\exp(x) + \exp(-x)}`
See :class:`~torch.nn.Tanh` for more details.
"""
warnings.warn("nn.functional.tanh is deprecated. Use torch.tanh instead.")
return input.tanh()
def sigmoid(input):
r"""sigmoid(input) -> Tensor
Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}`
See :class:`~torch.nn.Sigmoid` for more details.
"""
warnings.warn("nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.")
return input.sigmoid()
def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
r"""Applies the element-wise function
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
1 & \text{if~} x \ge +3, \\
x / 6 + 1 / 2 & \text{otherwise}
\end{cases}
Args:
inplace: If set to ``True``, will do this operation in-place. Default: ``False``
See :class:`~torch.nn.Hardsigmoid` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(hardsigmoid, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.hardsigmoid_(input)
return torch._C._nn.hardsigmoid(input)
linear = _add_docstr(
torch._C._nn.linear,
r"""
linear(input, weight, bias=None) -> Tensor
Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
This operator supports :ref:`TensorFloat32<tf32_on_ampere>`.
Shape:
- Input: :math:`(*, in\_features)` where `*` means any number of
additional dimensions, including none
- Weight: :math:`(out\_features, in\_features)` or :math:`(in\_features)`
- Bias: :math:`(out\_features)` or :math:`()`
- Output: :math:`(*, out\_features)` or :math:`(*)`, based on the shape of the weight
""")
bilinear = _add_docstr(
torch.bilinear,
r"""
bilinear(input1, input2, weight, bias=None) -> Tensor
Applies a bilinear transformation to the incoming data:
:math:`y = x_1^T A x_2 + b`
Shape:
- input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}`
and :math:`*` means any number of additional dimensions.
All but the last dimension of the inputs should be the same.
- input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}`
- weight: :math:`(\text{out\_features}, \text{in1\_features},
\text{in2\_features})`
- bias: :math:`(\text{out\_features})`
- output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}`
and all but the last dimension are the same shape as the input.
""")
def silu(input: Tensor, inplace: bool = False) -> Tensor:
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
.. math::
\text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.}
.. note::
See `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_
where the SiLU (Sigmoid Linear Unit) was originally coined, and see
`Sigmoid-Weighted Linear Units for Neural Network Function Approximation
in Reinforcement Learning <https://arxiv.org/abs/1702.03118>`_ and `Swish:
a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941v1>`_
where the SiLU was experimented with later.
See :class:`~torch.nn.SiLU` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(silu, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.silu_(input)
return torch._C._nn.silu(input)
def mish(input: Tensor, inplace: bool = False) -> Tensor:
r"""Applies the Mish function, element-wise.
Mish: A Self Regularized Non-Monotonic Neural Activation Function.
.. math::
\text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x))
.. note::
See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_
See :class:`~torch.nn.Mish` for more details.
"""
if has_torch_function_unary(input):
return handle_torch_function(mish, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.mish_(input)
return torch._C._nn.mish(input)
def hardswish(input: Tensor, inplace: bool = False) -> Tensor:
r"""Applies the hardswish function, element-wise, as described in the paper:
`Searching for MobileNetV3`_.
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{if~} x \le -3, \\
x & \text{if~} x \ge +3, \\
x \cdot (x + 3) /6 & \text{otherwise}
\end{cases}
See :class:`~torch.nn.Hardswish` for more details.
.. _`Searching for MobileNetV3`:
https://arxiv.org/abs/1905.02244
"""
if has_torch_function_unary(input):
return handle_torch_function(hardswish, (input,), input, inplace=inplace)
if inplace:
return torch._C._nn.hardswish_(input)
return torch._C._nn.hardswish(input)
def _no_grad_embedding_renorm_(weight: Tensor, input: Tensor, max_norm: float, norm_type: float) -> Tuple[Tensor, Tensor]:
torch.embedding_renorm_(weight.detach(), input, max_norm, norm_type)
def embedding(
input: Tensor,
weight: Tensor,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
) -> Tensor:
r"""A simple lookup table that looks up embeddings in a fixed dictionary and size.
This module is often used to retrieve word embeddings using indices.
The input to the module is a list of indices, and the embedding matrix,
and the output is the corresponding word embeddings.
See :class:`torch.nn.Embedding` for more details.
Args:
input (LongTensor): Tensor containing indices into the embedding matrix
weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,
and number of columns equal to the embedding size
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
i.e. it remains as a fixed "pad".
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
Note: this will modify :attr:`weight` in-place.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under
:class:`torch.nn.Embedding` for more details regarding sparse gradients.
Shape:
- Input: LongTensor of arbitrary shape containing the indices to extract
- Weight: Embedding matrix of floating point type with shape `(V, embedding_dim)`,
where V = maximum index + 1 and embedding_dim = the embedding size
- Output: `(*, embedding_dim)`, where `*` is the input shape
Examples::
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
>>> # an embedding matrix containing 10 tensors of size 3
>>> embedding_matrix = torch.rand(10, 3)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> F.embedding(input, embedding_matrix)
tensor([[[ 0.8490, 0.9625, 0.6753],
[ 0.9666, 0.7761, 0.6108],
[ 0.6246, 0.9751, 0.3618],
[ 0.4161, 0.2419, 0.7383]],
[[ 0.6246, 0.9751, 0.3618],
[ 0.0237, 0.7794, 0.0528],
[ 0.9666, 0.7761, 0.6108],
[ 0.3385, 0.8612, 0.1867]]])
>>> # example with padding_idx
>>> weights = torch.rand(10, 3)
>>> weights[0, :].zero_()
>>> embedding_matrix = weights
>>> input = torch.tensor([[0,2,0,5]])
>>> F.embedding(input, embedding_matrix, padding_idx=0)
tensor([[[ 0.0000, 0.0000, 0.0000],
[ 0.5609, 0.5384, 0.8720],
[ 0.0000, 0.0000, 0.0000],
[ 0.6262, 0.2438, 0.7471]]])
"""
if has_torch_function_variadic(input, weight):
return handle_torch_function(
embedding,
(input, weight),
input,
weight,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse,
)
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < weight.size(0), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert padding_idx >= -weight.size(0), "Padding_idx must be within num_embeddings"
padding_idx = weight.size(0) + padding_idx
else:
padding_idx = -1
if max_norm is not None:
# Note [embedding_renorm contiguous]
# `embedding_renorm_` will call .contiguous() on input anyways, so we
# call it here and take advantage of the improved locality in the
# `embedding` call below too.
input = input.contiguous()
# Note [embedding_renorm set_grad_enabled]
# XXX: equivalent to
# with torch.no_grad():
# torch.embedding_renorm_
# remove once script supports set_grad_enabled
_no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
def embedding_bag(
input: Tensor,
weight: Tensor,
offsets: Optional[Tensor] = None,
max_norm: Optional[float] = None,
norm_type: float = 2,
scale_grad_by_freq: bool = False,
mode: str = "mean",
sparse: bool = False,
per_sample_weights: Optional[Tensor] = None,
include_last_offset: bool = False,
padding_idx: Optional[int] = None,
) -> Tensor:
r"""Computes sums, means or maxes of `bags` of embeddings, without instantiating the
intermediate embeddings.
See :class:`torch.nn.EmbeddingBag` for more details.
Note:
{backward_reproducibility_note}
Args:
input (LongTensor): Tensor containing bags of indices into the embedding matrix
weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,
and number of columns equal to the embedding size
offsets (LongTensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
the starting index position of each bag (sequence) in :attr:`input`.
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
Note: this will modify :attr:`weight` in-place.
norm_type (float, optional): The ``p`` in the ``p``-norm to compute for the :attr:`max_norm` option.
Default ``2``.
scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under
:class:`torch.nn.Embedding` for more details regarding sparse gradients.
Note: this option is not supported when ``mode="max"``.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be 1. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not None.
include_last_offset (bool, optional): if ``True``, the size of offsets is equal to the number of bags + 1.
The last element is the size of the input, or the ending index position of the last bag (sequence).
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
during training, i.e. it remains as a fixed "pad". Note that the embedding
vector at :attr:`padding_idx` is excluded from the reduction.
Shape:
- :attr:`input` (LongTensor) and :attr:`offsets` (LongTensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
each of fixed length ``N``, and this will return ``B`` values aggregated in a way
depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing
the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets`
of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags.
Empty bags (i.e., having 0-length) will have returned vectors filled by zeros.
- :attr:`weight` (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
- :attr:`per_sample_weights` (Tensor, optional). Has the same shape as :attr:`input`.
- :attr:`output`: aggregated embedding values of shape `(B, embedding_dim)`
Examples::
>>> # an Embedding module containing 10 tensors of size 3
>>> embedding_matrix = torch.rand(10, 3)
>>> # a batch of 2 samples of 4 indices each
>>> input = torch.tensor([1,2,4,5,4,3,2,9])
>>> offsets = torch.tensor([0,4])
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> F.embedding_bag(input, embedding_matrix, offsets)
tensor([[ 0.3397, 0.3552, 0.5545],
[ 0.5893, 0.4386, 0.5882]])
>>> # example with padding_idx
>>> embedding_matrix = torch.rand(10, 3)
>>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9])
>>> offsets = torch.tensor([0,4])
>>> F.embedding_bag(input, embedding_matrix, offsets, padding_idx=2, mode='sum')
tensor([[ 0.0000, 0.0000, 0.0000],
[-0.7082, 3.2145, -2.6251]])
"""
if has_torch_function_variadic(input, weight, offsets, per_sample_weights):
return handle_torch_function(
embedding_bag,
(input, weight, offsets, per_sample_weights),
input,
weight,
offsets=offsets,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
mode=mode,
sparse=sparse,
per_sample_weights=per_sample_weights,
include_last_offset=include_last_offset,
padding_idx=padding_idx,
)
# Check for backward compatibility.
# Used to be embedding_bag(weight, input, ...)
# Now is embedding_bag(input, weight, ...)
if weight.dtype == torch.long and input.is_floating_point():
warnings.warn(
"Argument order of nn.functional.embedding_bag was changed. "
"Usage `embedding_bag(weight, input, ...)` is deprecated, "
"and should now be `embedding_bag(input, weight, ...)`."
)
weight, input = input, weight
if per_sample_weights is not None and input.size() != per_sample_weights.size():
raise ValueError(
"embedding_bag: If per_sample_weights ({}) is not None, "
"then it must have the same shape as the input ({})".format(per_sample_weights.shape, input.shape)
)
if input.dim() == 2:
if offsets is not None:
type_str = "<unknown>"
# TODO: Remove this once script supports type() calls
if not torch.jit.is_scripting():
type_str = str(type(offsets))
raise ValueError(
"if input is 2D, then offsets has to be None"
", as input is treated is a mini-batch of"
" fixed length sequences. However, found "
"offsets of type {}".format(type_str)
)
offsets = torch.arange(0, input.numel(), input.size(1), dtype=input.dtype, device=input.device)
input = input.reshape(-1)
if per_sample_weights is not None:
per_sample_weights = per_sample_weights.reshape(-1)
elif input.dim() == 1:
if offsets is None:
raise ValueError("offsets has to be a 1D Tensor but got None")
if offsets.dim() != 1:
raise ValueError("offsets has to be a 1D Tensor")
else:
raise ValueError("input has to be 1D or 2D Tensor," " but got Tensor of dimension {}".format(input.dim()))
if mode == "sum":
mode_enum = 0
elif mode == "mean":
mode_enum = 1
elif mode == "max":
mode_enum = 2
if scale_grad_by_freq:
raise ValueError("max mode does not support scaling the gradient by the frequency")
if sparse:
raise ValueError("max mode does not support sparse weights")
else:
raise ValueError("mode has to be one of sum, mean or max")
if max_norm is not None:
# XXX: equivalent to
# with torch.no_grad():
# torch.nembedding_renorm_
# remove once script supports set_grad_enabled
_no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
if per_sample_weights is not None and mode != "sum":
raise NotImplementedError(
"embedding_bag: per_sample_weights was not None. "
"per_sample_weights is only supported for mode='sum' "
"(got mode='{}'). Please open a feature request on GitHub.".format(mode)
)
ret, _, _, _ = torch.embedding_bag(
weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights, include_last_offset, padding_idx
)
return ret
if embedding_bag.__doc__:
embedding_bag.__doc__ = embedding_bag.__doc__.format(**reproducibility_notes)
def _verify_batch_size(size: List[int]) -> None:
# XXX: JIT script does not support the reduce from functools, and mul op is a
# builtin, which cannot be used as a value to a func yet, so rewrite this size
# check to a simple equivalent for loop
#
# TODO: make use of reduce like below when JIT is ready with the missing features:
# from operator import mul
# from functools import reduce
#
# if reduce(mul, size[2:], size[0]) == 1
size_prods = size[0]
for i in range(len(size) - 2):
size_prods *= size[i + 2]
if size_prods == 1:
raise ValueError("Expected more than 1 value per channel when training, got input size {}".format(size))
def batch_norm(
input: Tensor,
running_mean: Optional[Tensor],
running_var: Optional[Tensor],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
training: bool = False,
momentum: float = 0.1,
eps: float = 1e-5,
) -> Tensor:
r"""Applies Batch Normalization for each channel across a batch of data.
See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`,
:class:`~torch.nn.BatchNorm3d` for details.
"""
if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
return handle_torch_function(
batch_norm,
(input, running_mean, running_var, weight, bias),
input,
running_mean,
running_var,
weight=weight,
bias=bias,
training=training,
momentum=momentum,
eps=eps,
)
if training:
_verify_batch_size(input.size())
return torch.batch_norm(
input, weight, bias, running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled
)
def _verify_spatial_size(size: List[int]) -> None:
# Verify that there is > 1 spatial element for instance norm calculation.
size_prods = 1
for i in range(2, len(size)):
size_prods *= size[i]
if size_prods == 1:
raise ValueError("Expected more than 1 spatial element when training, got input size {}".format(size))
def instance_norm(
input: Tensor,
running_mean: Optional[Tensor] = None,
running_var: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
use_input_stats: bool = True,
momentum: float = 0.1,
eps: float = 1e-5,
) -> Tensor:
r"""Applies Instance Normalization for each channel in each data sample in a
batch.
See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`,
:class:`~torch.nn.InstanceNorm3d` for details.
"""
if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
return handle_torch_function(
instance_norm,
(input, running_mean, running_var, weight, bias),
input,
running_mean=running_mean,
running_var=running_var,
weight=weight,
bias=bias,
use_input_stats=use_input_stats,
momentum=momentum,
eps=eps,
)
if use_input_stats:
_verify_spatial_size(input.size())
return torch.instance_norm(
input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch.backends.cudnn.enabled
)
def layer_norm(
input: Tensor,
normalized_shape: List[int],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
) -> Tensor:
r"""Applies Layer Normalization for last certain number of dimensions.
See :class:`~torch.nn.LayerNorm` for details.
"""
if has_torch_function_variadic(input, weight, bias):
return handle_torch_function(
layer_norm, (input, weight, bias), input, normalized_shape, weight=weight, bias=bias, eps=eps
)
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
def group_norm(
input: Tensor, num_groups: int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: float = 1e-5
) -> Tensor:
r"""Applies Group Normalization for last certain number of dimensions.
See :class:`~torch.nn.GroupNorm` for details.
"""
if has_torch_function_variadic(input, weight, bias):
return handle_torch_function(group_norm, (input, weight, bias,), input, num_groups, weight=weight, bias=bias, eps=eps)
_verify_batch_size([input.size(0) * input.size(1) // num_groups, num_groups] + list(input.size()[2:]))
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
def local_response_norm(input: Tensor, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.0) -> Tensor:
r"""Applies local response normalization over an input signal composed of
several input planes, where channels occupy the second dimension.
Applies normalization across channels.
See :class:`~torch.nn.LocalResponseNorm` for details.
"""
if has_torch_function_unary(input):
return handle_torch_function(local_response_norm, (input,), input, size, alpha=alpha, beta=beta, k=k)
dim = input.dim()
if dim < 3:
raise ValueError(
"Expected 3D or higher dimensionality \
input (got {} dimensions)".format(
dim
)
)
if input.numel() == 0:
return input
div = input.mul(input).unsqueeze(1)
if dim == 3:
div = pad(div, (0, 0, size // 2, (size - 1) // 2))
div = avg_pool2d(div, (size, 1), stride=1).squeeze(1)
else:
sizes = input.size()
div = div.view(sizes[0], 1, sizes[1], sizes[2], -1)
div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2))
div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1)
div = div.view(sizes)
div = div.mul(alpha).add(k).pow(beta)
return input / div
# loss
def ctc_loss(
log_probs: Tensor,
targets: Tensor,
input_lengths: Tensor,
target_lengths: Tensor,
blank: int = 0,
reduction: str = "mean",
zero_infinity: bool = False,
) -> Tensor:
r"""The Connectionist Temporal Classification loss.
See :class:`~torch.nn.CTCLoss` for details.
Note:
{cudnn_reproducibility_note}
Note:
{backward_reproducibility_note}
Args:
log_probs: :math:`(T, N, C)` or :math:`(T, C)` where `C = number of characters in alphabet including blank`,
`T = input length`, and `N = batch size`.
The logarithmized probabilities of the outputs
(e.g. obtained with :func:`torch.nn.functional.log_softmax`).
targets: :math:`(N, S)` or `(sum(target_lengths))`.
Targets cannot be blank. In the second form, the targets are assumed to be concatenated.
input_lengths: :math:`(N)` or :math:`()`.
Lengths of the inputs (must each be :math:`\leq T`)
target_lengths: :math:`(N)` or :math:`()`.
Lengths of the targets
blank (int, optional):
Blank label. Default :math:`0`.
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output losses will be divided by the target lengths and
then the mean over the batch is taken, ``'sum'``: the output will be
summed. Default: ``'mean'``
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Example::
>>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_()
>>> targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
>>> input_lengths = torch.full((16,), 50, dtype=torch.long)
>>> target_lengths = torch.randint(10,30,(16,), dtype=torch.long)
>>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)
>>> loss.backward()
"""
if has_torch_function_variadic(log_probs, targets, input_lengths, target_lengths):
return handle_torch_function(
ctc_loss,
(log_probs, targets, input_lengths, target_lengths),
log_probs, targets, input_lengths, target_lengths,
blank=blank, reduction=reduction, zero_infinity=zero_infinity
)
return torch.ctc_loss(
log_probs, targets, input_lengths, target_lengths, blank, _Reduction.get_enum(reduction), zero_infinity
)
if ctc_loss.__doc__:
ctc_loss.__doc__ = ctc_loss.__doc__.format(**reproducibility_notes)
def nll_loss(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
ignore_index: int = -100,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""The negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1`
in the case of K-dimensional loss. `input` is expected to be log-probabilities.
target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`,
or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for
K-dimensional loss.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets. Default: -100
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Example::
>>> # input is of size N x C = 3 x 5
>>> input = torch.randn(3, 5, requires_grad=True)
>>> # each element in target has to have 0 <= value < C
>>> target = torch.tensor([1, 0, 4])
>>> output = F.nll_loss(F.log_softmax(input), target)
>>> output.backward()
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
nll_loss,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
ignore_index=ignore_index,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
def poisson_nll_loss(
input: Tensor,
target: Tensor,
log_input: bool = True,
full: bool = False,
size_average: Optional[bool] = None,
eps: float = 1e-8,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""Poisson negative log likelihood loss.
See :class:`~torch.nn.PoissonNLLLoss` for details.
Args:
input: expectation of underlying Poisson distribution.
target: random sample :math:`target \sim \text{Poisson}(input)`.
log_input: if ``True`` the loss is computed as
:math:`\exp(\text{input}) - \text{target} * \text{input}`, if ``False`` then loss is
:math:`\text{input} - \text{target} * \log(\text{input}+\text{eps})`. Default: ``True``
full: whether to compute full loss, i. e. to add the Stirling
approximation term. Default: ``False``
:math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when
:attr:`log_input`\ =\ ``False``. Default: 1e-8
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
poisson_nll_loss,
(input, target),
input,
target,
log_input=log_input,
full=full,
size_average=size_average,
eps=eps,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
if reduction != "none" and reduction != "mean" and reduction != "sum":
ret = input
raise ValueError(reduction + " is not valid")
ret = torch.poisson_nll_loss(input, target, log_input, full, eps, _Reduction.get_enum(reduction))
return ret
def gaussian_nll_loss(
input: Tensor,
target: Tensor,
var: Tensor,
full: bool = False,
eps: float = 1e-6,
reduction: str = "mean",
) -> Tensor:
r"""Gaussian negative log likelihood loss.
See :class:`~torch.nn.GaussianNLLLoss` for details.
Args:
input: expectation of the Gaussian distribution.
target: sample from the Gaussian distribution.
var: tensor of positive variance(s), one for each of the expectations
in the input (heteroscedastic), or a single one (homoscedastic).
full (bool, optional): include the constant term in the loss calculation. Default: ``False``.
eps (float, optional): value added to var, for stability. Default: 1e-6.
reduction (str, optional): specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output is the average of all batch member losses,
``'sum'``: the output is the sum of all batch member losses.
Default: ``'mean'``.
"""
if has_torch_function_variadic(input, target, var):
return handle_torch_function(
gaussian_nll_loss,
(input, target, var),
input,
target,
var,
full=full,
eps=eps,
reduction=reduction,
)
# Check var size
# If var.size == input.size, the case is heteroscedastic and no further checks are needed.
# Otherwise:
if var.size() != input.size():
# If var is one dimension short of input, but the sizes match otherwise, then this is a homoscedastic case.
# e.g. input.size = (10, 2, 3), var.size = (10, 2)
# -> unsqueeze var so that var.shape = (10, 2, 1)
# this is done so that broadcasting can happen in the loss calculation
if input.size()[:-1] == var.size():
var = torch.unsqueeze(var, -1)
# This checks if the sizes match up to the final dimension, and the final dimension of var is of size 1.
# This is also a homoscedastic case.
# e.g. input.size = (10, 2, 3), var.size = (10, 2, 1)
elif input.size()[:-1] == var.size()[:-1] and var.size(-1) == 1: # Heteroscedastic case
pass
# If none of the above pass, then the size of var is incorrect.
else:
raise ValueError("var is of incorrect size")
# Check validity of reduction mode
if reduction != 'none' and reduction != 'mean' and reduction != 'sum':
raise ValueError(reduction + " is not valid")
# Entries of var must be non-negative
if torch.any(var < 0):
raise ValueError("var has negative entry/entries")
# Clamp for stability
var = var.clone()
with torch.no_grad():
var.clamp_(min=eps)
# Calculate the loss
loss = 0.5 * (torch.log(var) + (input - target)**2 / var)
if full:
loss += 0.5 * math.log(2 * math.pi)
if reduction == 'mean':
return loss.mean()
elif reduction == 'sum':
return loss.sum()
else:
return loss
def kl_div(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
log_target: bool = False,
) -> Tensor:
r"""The `Kullback-Leibler divergence Loss
<https://en.wikipedia.org/wiki/Kullback-Leibler_divergence>`__
See :class:`~torch.nn.KLDivLoss` for details.
Args:
input: Tensor of arbitrary shape in log-probabilities.
target: Tensor of the same shape as input. See :attr:`log_target` for
the target's interpretation.
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``.
``'none'``: no reduction will be applied
``'batchmean'``: the sum of the output will be divided by the batchsize
``'sum'``: the output will be summed
``'mean'``: the output will be divided by the number of elements in the output
Default: ``'mean'``
log_target (bool): A flag indicating whether ``target`` is passed in the log space.
It is recommended to pass certain distributions (like ``softmax``)
in the log space to avoid numerical issues caused by explicit ``log``.
Default: ``False``
.. note::
:attr:`size_average` and :attr:`reduce` are in the process of being deprecated,
and in the meantime, specifying either of those two args will override :attr:`reduction`.
.. note::
:attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use
:attr:`reduction` = ``'batchmean'`` which aligns with KL math definition.
In the next major release, ``'mean'`` will be changed to be the same as 'batchmean'.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
kl_div,
(input, target),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
log_target=log_target,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
if reduction == "mean":
warnings.warn(
"reduction: 'mean' divides the total loss by both the batch size and the support size."
"'batchmean' divides only by the batch size, and aligns with the KL div math definition."
"'mean' will be changed to behave the same as 'batchmean' in the next major release."
)
# special case for batchmean
if reduction == "batchmean":
reduction_enum = _Reduction.get_enum("sum")
else:
reduction_enum = _Reduction.get_enum(reduction)
reduced = torch.kl_div(input, target, reduction_enum, log_target=log_target)
if reduction == "batchmean" and input.dim() != 0:
reduced = reduced / input.size()[0]
return reduced
def cross_entropy(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
ignore_index: int = -100,
reduce: Optional[bool] = None,
reduction: str = "mean",
label_smoothing: float = 0.0,
) -> Tensor:
r"""This criterion computes the cross entropy loss between input logits and target.
See :class:`~torch.nn.CrossEntropyLoss` for details.
Args:
input (Tensor) : Predicted unnormalized logits;
see Shape section below for supported shapes.
target (Tensor) : Ground truth class indices or class probabilities;
see Shape section below for supported shapes.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When :attr:`size_average` is
``True``, the loss is averaged over non-ignored targets. Note that
:attr:`ignore_index` is only applicable when the target contains class indices.
Default: -100
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount
of smoothing when computing the loss, where 0.0 means no smoothing. The targets
become a mixture of the original ground truth and a uniform distribution as described in
`Rethinking the Inception Architecture for Computer Vision <https://arxiv.org/abs/1512.00567>`__. Default: :math:`0.0`.
Shape:
- Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
in the case of `K`-dimensional loss.
- Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with
:math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`.
If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`.
where:
.. math::
\begin{aligned}
C ={} & \text{number of classes} \\
N ={} & \text{batch size} \\
\end{aligned}
Examples::
>>> # Example of target with class indices
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randint(5, (3,), dtype=torch.int64)
>>> loss = F.cross_entropy(input, target)
>>> loss.backward()
>>>
>>> # Example of target with class probabilities
>>> input = torch.randn(3, 5, requires_grad=True)
>>> target = torch.randn(3, 5).softmax(dim=1)
>>> loss = F.cross_entropy(input, target)
>>> loss.backward()
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
cross_entropy,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
ignore_index=ignore_index,
reduce=reduce,
reduction=reduction,
label_smoothing=label_smoothing,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
def binary_cross_entropy(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""Function that measures the Binary Cross Entropy between the target and input
probabilities.
See :class:`~torch.nn.BCELoss` for details.
Args:
input: Tensor of arbitrary shape as probabilities.
target: Tensor of the same shape as input with values between 0 and 1.
weight (Tensor, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Examples::
>>> input = torch.randn(3, 2, requires_grad=True)
>>> target = torch.rand(3, 2, requires_grad=False)
>>> loss = F.binary_cross_entropy(torch.sigmoid(input), target)
>>> loss.backward()
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
binary_cross_entropy,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if target.size() != input.size():
raise ValueError(
"Using a target size ({}) that is different to the input size ({}) is deprecated. "
"Please ensure they have the same size.".format(target.size(), input.size())
)
if weight is not None:
new_size = _infer_size(target.size(), weight.size())
weight = weight.expand(new_size)
return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum)
def binary_cross_entropy_with_logits(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
pos_weight: Optional[Tensor] = None,
) -> Tensor:
r"""Function that measures Binary Cross Entropy between target and input
logits.
See :class:`~torch.nn.BCEWithLogitsLoss` for details.
Args:
input: Tensor of arbitrary shape as unnormalized scores (often referred to as logits).
target: Tensor of the same shape as input with values between 0 and 1
weight (Tensor, optional): a manual rescaling weight
if provided it's repeated to match input tensor shape
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when reduce is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
pos_weight (Tensor, optional): a weight of positive examples.
Must be a vector with length equal to the number of classes.
Examples::
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> loss = F.binary_cross_entropy_with_logits(input, target)
>>> loss.backward()
"""
if has_torch_function_variadic(input, target, weight, pos_weight):
return handle_torch_function(
binary_cross_entropy_with_logits,
(input, target, weight, pos_weight),
input,
target,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
pos_weight=pos_weight,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum)
def smooth_l1_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
beta: float = 1.0,
) -> Tensor:
r"""Function that uses a squared term if the absolute
element-wise error falls below beta and an L1 term otherwise.
See :class:`~torch.nn.SmoothL1Loss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
smooth_l1_loss,
(input, target),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
beta=beta,
)
if not (target.size() == input.size()):
warnings.warn(
"Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
return torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), beta)
def huber_loss(
input: Tensor,
target: Tensor,
reduction: str = 'mean',
delta: float = 1.0,
) -> Tensor:
r"""Function that uses a squared term if the absolute
element-wise error falls below delta and a delta-scaled L1 term otherwise.
See :class:`~torch.nn.HuberLoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
huber_loss,
(input, target),
input,
target,
reduction=reduction,
delta=delta,
)
if not (target.size() == input.size()):
warnings.warn("Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
return torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), delta)
def l1_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Function that takes the mean element-wise absolute value difference.
See :class:`~torch.nn.L1Loss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
l1_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction
)
if not (target.size() == input.size()):
warnings.warn(
"Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
return torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
def mse_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
Measures the element-wise mean squared error.
See :class:`~torch.nn.MSELoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
mse_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction
)
if not (target.size() == input.size()):
warnings.warn(
"Using a target size ({}) that is different to the input size ({}). "
"This will likely lead to incorrect results due to broadcasting. "
"Please ensure they have the same size.".format(target.size(), input.size()),
stacklevel=2,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
return torch._C._nn.mse_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction))
def margin_ranking_loss(
input1: Tensor,
input2: Tensor,
target: Tensor,
margin: float = 0,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MarginRankingLoss` for details.
"""
if has_torch_function_variadic(input1, input2, target):
return handle_torch_function(
margin_ranking_loss,
(input1, input2, target),
input1,
input2,
target,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if (input1.dim() != input2.dim() or input1.dim() != target.dim()):
raise RuntimeError(
(
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
"input1: {}, input2: {}, target: {} ".format(input1.size(), input2.size(), target.size())
)
)
return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)
def hinge_embedding_loss(
input: Tensor,
target: Tensor,
margin: float = 1.0,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.HingeEmbeddingLoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
hinge_embedding_loss,
(input, target),
input,
target,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch.hinge_embedding_loss(input, target, margin, reduction_enum)
def multilabel_margin_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiLabelMarginLoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
multilabel_margin_loss,
(input, target),
input,
target,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)
def soft_margin_loss(
input: Tensor,
target: Tensor,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.SoftMarginLoss` for details.
"""
if has_torch_function_variadic(input, target):
return handle_torch_function(
soft_margin_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch._C._nn.soft_margin_loss(input, target, reduction_enum)
def multilabel_soft_margin_loss(
input: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""multilabel_soft_margin_loss(input, target, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
multilabel_soft_margin_loss,
(input, target, weight),
input,
target,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction = _Reduction.legacy_get_string(size_average, reduce)
loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input))
if weight is not None:
loss = loss * weight
class_dim = input.dim() - 1
C = input.size(class_dim)
loss = loss.sum(dim=class_dim) / C # only return N loss values
if reduction == "none":
ret = loss
elif reduction == "mean":
ret = loss.mean()
elif reduction == "sum":
ret = loss.sum()
else:
ret = input
raise ValueError(reduction + " is not valid")
return ret
def cosine_embedding_loss(
input1: Tensor,
input2: Tensor,
target: Tensor,
margin: float = 0,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.CosineEmbeddingLoss` for details.
"""
if has_torch_function_variadic(input1, input2, target):
return handle_torch_function(
cosine_embedding_loss,
(input1, input2, target),
input1,
input2,
target,
margin=margin,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)
def multi_margin_loss(
input: Tensor,
target: Tensor,
p: int = 1,
margin: float = 1.0,
weight: Optional[Tensor] = None,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiMarginLoss` for details.
"""
if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
multi_margin_loss,
(input, target, weight),
input,
target,
p=p,
margin=margin,
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
if p != 1 and p != 2:
raise ValueError("only p == 1 and p == 2 supported")
if weight is not None:
if weight.dim() != 1:
raise ValueError("weight must be one-dimensional")
return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)
pixel_shuffle = _add_docstr(
torch.pixel_shuffle,
r"""
pixel_shuffle(input, upscale_factor) -> Tensor
Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` to a
tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is the :attr:`upscale_factor`.
See :class:`~torch.nn.PixelShuffle` for details.
Args:
input (Tensor): the input tensor
upscale_factor (int): factor to increase spatial resolution by
Examples::
>>> input = torch.randn(1, 9, 4, 4)
>>> output = torch.nn.functional.pixel_shuffle(input, 3)
>>> print(output.size())
torch.Size([1, 1, 12, 12])
""",
)
pixel_unshuffle = _add_docstr(
torch.pixel_unshuffle,
r"""
pixel_unshuffle(input, downscale_factor) -> Tensor
Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements in a
tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
:math:`(*, C \times r^2, H, W)`, where r is the :attr:`downscale_factor`.
See :class:`~torch.nn.PixelUnshuffle` for details.
Args:
input (Tensor): the input tensor
downscale_factor (int): factor to increase spatial resolution by
Examples::
>>> input = torch.randn(1, 1, 12, 12)
>>> output = torch.nn.functional.pixel_unshuffle(input, 3)
>>> print(output.size())
torch.Size([1, 9, 4, 4])
""",
)
channel_shuffle = _add_docstr(
torch.channel_shuffle,
r"""
channel_shuffle(input, groups) -> Tensor
Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
while keeping the original tensor shape.
See :class:`~torch.nn.ChannelShuffle` for details.
Args:
input (Tensor): the input tensor
groups (int): number of groups to divide channels in and rearrange.
Examples::
>>> input = torch.randn(1, 4, 2, 2)
>>> print(input)
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
>>> output = torch.nn.functional.channel_shuffle(input, 2)
>>> print(output)
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
""",
)
native_channel_shuffle = _add_docstr(
torch.native_channel_shuffle,
r"""
native_channel_shuffle(input, groups) -> Tensor
Native kernel level implementation of the `channel_shuffle`.
This function might become private in future releases, use with caution.
Divide the channels in a tensor of shape :math:`(*, C , H, W)`
into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`,
while keeping the original tensor shape.
See :class:`~torch.nn.ChannelShuffle` for details.
Args:
input (Tensor): the input tensor
groups (int): number of groups to divide channels in and rearrange.
Examples::
>>> input = torch.randn(1, 4, 2, 2)
>>> print(input)
[[[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]],
[[9, 10],
[11, 12]],
[[13, 14],
[15, 16]],
]]
>>> output = torch.nn.functional.native_channel_shuffle(input, 2)
>>> print(output)
[[[[1, 2],
[3, 4]],
[[9, 10],
[11, 12]],
[[5, 6],
[7, 8]],
[[13, 14],
[15, 16]],
]]
""",
)
@_overload # noqa: F811
def upsample(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = "nearest", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def upsample(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None, mode: str = "nearest", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811
pass
def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=None): # noqa: F811
r"""Upsamples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
This is equivalent with ``nn.functional.interpolate(...)``.
Note:
{backward_reproducibility_note}
The algorithm used for upsampling is determined by :attr:`mode`.
Currently temporal, spatial and volumetric upsampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
The modes available for upsampling are: `nearest`, `linear` (3D-only),
`bilinear`, `bicubic` (4D-only), `trilinear` (5D-only)
Args:
input (Tensor): the input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
mode (str): algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'nearest'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
Default: ``False``
.. note::
With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce
negative values or values greater than 255 for images.
Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot
when displaying the image.
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, and `trilinear`) don't proportionally align the
output and input pixels, and thus the output values can depend on the
input size. This was the default behavior for these modes up to version
0.3.1. Since then, the default behavior is ``align_corners = False``.
See :class:`~torch.nn.Upsample` for concrete examples on how this
affects the outputs.
"""
warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode, align_corners)
if upsample.__doc__:
upsample.__doc__ = upsample.__doc__.format(**reproducibility_notes)
@_overload # noqa: F811
def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def interpolate(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def interpolate( # noqa: F811
input: Tensor,
size: Optional[List[int]] = None,
scale_factor: Optional[float] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
recompute_scale_factor: Optional[bool] = None,
antialias: bool = False,
) -> Tensor: # noqa: F811
pass
def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811
r"""Down/up samples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
The algorithm used for interpolation is determined by :attr:`mode`.
Currently temporal, spatial and volumetric sampling are supported, i.e.
expected inputs are 3-D, 4-D or 5-D in shape.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
The modes available for resizing are: `nearest`, `linear` (3D-only),
`bilinear`, `bicubic` (4D-only), `trilinear` (5D-only), `area`, `nearest-exact`
Args:
input (Tensor): the input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. If `scale_factor` is a tuple,
its length has to match the number of spatial dimensions; `input.dim() - 2`.
mode (str): algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'`` | ``'area'`` | ``'nearest-exact'``. Default: ``'nearest'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``.
Default: ``False``
recompute_scale_factor (bool, optional): recompute the scale_factor for use in the
interpolation calculation. If `recompute_scale_factor` is ``True``, then
`scale_factor` must be passed in and `scale_factor` is used to compute the
output `size`. The computed output `size` will be used to infer new scales for
the interpolation. Note that when `scale_factor` is floating-point, it may differ
from the recomputed `scale_factor` due to rounding and precision issues.
If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will
be used directly for interpolation. Default: ``None``.
antialias (bool, optional): flag to apply anti-aliasing. Default: ``False``. Using anti-alias
option together with ``align_corners=False``, interpolation result would match Pillow
result for downsampling operation. Supported modes: ``'bilinear'``, ``'bicubic'``.
.. note::
With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce
negative values or values greater than 255 for images.
Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot
when displaying the image.
.. note::
Mode ``mode='nearest-exact'`` matches Scikit-Image and PIL nearest neighbours interpolation
algorithms and fixes known issues with ``mode='nearest'``. This mode is introduced to keep
backward compatibility.
Mode ``mode='nearest'`` matches buggy OpenCV's ``INTER_NEAREST`` interpolation algorithm.
Note:
{backward_reproducibility_note}
"""
if has_torch_function_unary(input):
return handle_torch_function(
interpolate,
(input,),
input,
size=size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor,
antialias=antialias
)
if mode in ("nearest", "area", "nearest-exact"):
if align_corners is not None:
raise ValueError(
"align_corners option can only be set with the "
"interpolating modes: linear | bilinear | bicubic | trilinear"
)
else:
if align_corners is None:
align_corners = False
dim = input.dim() - 2 # Number of spatial dimensions.
# Process size and scale_factor. Validate that exactly one is set.
# Validate its length if it is a list, or expand it if it is a scalar.
# After this block, exactly one of output_size and scale_factors will
# be non-None, and it will be a list (or tuple).
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
elif size is not None:
assert scale_factor is None
scale_factors = None
if isinstance(size, (list, tuple)):
if len(size) != dim:
raise ValueError(
"Input and output must have the same number of spatial dimensions, but got "
f"input with with spatial dimensions of {list(input.shape[2:])} and output size of {size}. "
"Please provide input tensor in (N, C, d1, d2, ...,dK) format and "
"output size in (o1, o2, ...,oK) format."
)
output_size = size
else:
output_size = [size for _ in range(dim)]
elif scale_factor is not None:
assert size is None
output_size = None
if isinstance(scale_factor, (list, tuple)):
if len(scale_factor) != dim:
raise ValueError(
"Input and scale_factor must have the same number of spatial dimensions, but "
f"got input with spatial dimensions of {list(input.shape[2:])} and "
f"scale_factor of shape {scale_factor}. "
"Please provide input tensor in (N, C, d1, d2, ...,dK) format and "
"scale_factor in (s1, s2, ...,sK) format."
)
scale_factors = scale_factor
else:
scale_factors = [scale_factor for _ in range(dim)]
else:
raise ValueError("either size or scale_factor should be defined")
if recompute_scale_factor is not None and recompute_scale_factor and size is not None:
raise ValueError("recompute_scale_factor is not meaningful with an explicit size.")
# "area" mode always requires an explicit size rather than scale factor.
# Re-use the recompute_scale_factor code path.
if mode == "area" and output_size is None:
recompute_scale_factor = True
if recompute_scale_factor is not None and recompute_scale_factor:
# We compute output_size here, then un-set scale_factors.
# The C++ code will recompute it based on the (integer) output size.
if not torch.jit.is_scripting() and torch._C._get_tracing_state():
# make scale_factor a tensor in tracing so constant doesn't get baked in
output_size = [
(torch.floor((input.size(i + 2).float() * torch.tensor(scale_factors[i], dtype=torch.float32)).float()))
for i in range(dim)
]
else:
assert scale_factors is not None
output_size = [int(math.floor(float(input.size(i + 2)) * scale_factors[i])) for i in range(dim)]
scale_factors = None
if antialias and not (mode in ("bilinear", "bicubic") and input.ndim == 4):
raise ValueError("Anti-alias option is only supported for bilinear and bicubic modes")
if input.dim() == 3 and mode == "nearest":
return torch._C._nn.upsample_nearest1d(input, output_size, scale_factors)
if input.dim() == 4 and mode == "nearest":
return torch._C._nn.upsample_nearest2d(input, output_size, scale_factors)
if input.dim() == 5 and mode == "nearest":
return torch._C._nn.upsample_nearest3d(input, output_size, scale_factors)
if input.dim() == 3 and mode == "nearest-exact":
return torch._C._nn._upsample_nearest_exact1d(input, output_size, scale_factors)
if input.dim() == 4 and mode == "nearest-exact":
return torch._C._nn._upsample_nearest_exact2d(input, output_size, scale_factors)
if input.dim() == 5 and mode == "nearest-exact":
return torch._C._nn._upsample_nearest_exact3d(input, output_size, scale_factors)
if input.dim() == 3 and mode == "area":
assert output_size is not None
return adaptive_avg_pool1d(input, output_size)
if input.dim() == 4 and mode == "area":
assert output_size is not None
return adaptive_avg_pool2d(input, output_size)
if input.dim() == 5 and mode == "area":
assert output_size is not None
return adaptive_avg_pool3d(input, output_size)
if input.dim() == 3 and mode == "linear":
assert align_corners is not None
return torch._C._nn.upsample_linear1d(input, output_size, align_corners, scale_factors)
if input.dim() == 4 and mode == "bilinear":
assert align_corners is not None
if antialias:
return torch._C._nn._upsample_bilinear2d_aa(input, output_size, align_corners, scale_factors)
return torch._C._nn.upsample_bilinear2d(input, output_size, align_corners, scale_factors)
if input.dim() == 5 and mode == "trilinear":
assert align_corners is not None
return torch._C._nn.upsample_trilinear3d(input, output_size, align_corners, scale_factors)
if input.dim() == 4 and mode == "bicubic":
assert align_corners is not None
if antialias:
return torch._C._nn._upsample_bicubic2d_aa(input, output_size, align_corners, scale_factors)
return torch._C._nn.upsample_bicubic2d(input, output_size, align_corners, scale_factors)
if input.dim() == 3 and mode == "bilinear":
raise NotImplementedError("Got 3D input, but bilinear mode needs 4D input")
if input.dim() == 3 and mode == "trilinear":
raise NotImplementedError("Got 3D input, but trilinear mode needs 5D input")
if input.dim() == 4 and mode == "linear":
raise NotImplementedError("Got 4D input, but linear mode needs 3D input")
if input.dim() == 4 and mode == "trilinear":
raise NotImplementedError("Got 4D input, but trilinear mode needs 5D input")
if input.dim() == 5 and mode == "linear":
raise NotImplementedError("Got 5D input, but linear mode needs 3D input")
if input.dim() == 5 and mode == "bilinear":
raise NotImplementedError("Got 5D input, but bilinear mode needs 4D input")
raise NotImplementedError(
"Input Error: Only 3D, 4D and 5D input Tensors supported"
" (got {}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact"
" (got {})".format(input.dim(), mode)
)
if interpolate.__doc__:
interpolate.__doc__ = interpolate.__doc__.format(**reproducibility_notes)
@_overload # noqa: F811
def upsample_nearest(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def upsample_nearest(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811
pass
def upsample_nearest(input, size=None, scale_factor=None): # noqa: F811
r"""Upsamples the input, using nearest neighbours' pixel values.
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
This is equivalent with ``nn.functional.interpolate(..., mode='nearest')``.
Currently spatial and volumetric upsampling are supported (i.e. expected
inputs are 4 or 5 dimensional).
Args:
input (Tensor): input
size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia
size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
Note:
{backward_reproducibility_note}
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.functional.upsample_nearest is deprecated. Use nn.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode="nearest")
if upsample_nearest.__doc__:
upsample_nearest.__doc__ = upsample_nearest.__doc__.format(**reproducibility_notes)
@_overload # noqa: F811
def upsample_bilinear(
input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None
) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def upsample_bilinear( # noqa: F811
input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None
) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def upsample_bilinear( # noqa: F811
input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None
) -> Tensor: # noqa: F811
pass
@_overload # noqa: F811
def upsample_bilinear( # noqa: F811
input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None
) -> Tensor: # noqa: F811
pass
def upsample_bilinear(input, size=None, scale_factor=None): # noqa: F811
r"""Upsamples the input, using bilinear upsampling.
.. warning::
This function is deprecated in favor of :func:`torch.nn.functional.interpolate`.
This is equivalent with
``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
Expected inputs are spatial (4 dimensional). Use `upsample_trilinear` fo
volumetric (5 dimensional) inputs.
Args:
input (Tensor): input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int or Tuple[int, int]): multiplier for spatial size
Note:
{backward_reproducibility_note}
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.functional.upsample_bilinear is deprecated. Use nn.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode="bilinear", align_corners=True)
if upsample_bilinear.__doc__:
upsample_bilinear.__doc__ = upsample_bilinear.__doc__.format(**reproducibility_notes)
GRID_SAMPLE_INTERPOLATION_MODES = {
"bilinear": 0,
"nearest": 1,
"bicubic": 2,
}
GRID_SAMPLE_PADDING_MODES = {
"zeros": 0,
"border": 1,
"reflection": 2,
}
def grid_sample(
input: Tensor,
grid: Tensor,
mode: str = "bilinear",
padding_mode: str = "zeros",
align_corners: Optional[bool] = None,
) -> Tensor:
r"""Given an :attr:`input` and a flow-field :attr:`grid`, computes the
``output`` using :attr:`input` values and pixel locations from :attr:`grid`.
Currently, only spatial (4-D) and volumetric (5-D) :attr:`input` are
supported.
In the spatial (4-D) case, for :attr:`input` with shape
:math:`(N, C, H_\text{in}, W_\text{in})` and :attr:`grid` with shape
:math:`(N, H_\text{out}, W_\text{out}, 2)`, the output will have shape
:math:`(N, C, H_\text{out}, W_\text{out})`.
For each output location ``output[n, :, h, w]``, the size-2 vector
``grid[n, h, w]`` specifies :attr:`input` pixel locations ``x`` and ``y``,
which are used to interpolate the output value ``output[n, :, h, w]``.
In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the
``x``, ``y``, ``z`` pixel locations for interpolating
``output[n, :, d, h, w]``. :attr:`mode` argument specifies ``nearest`` or
``bilinear`` interpolation method to sample the input pixels.
:attr:`grid` specifies the sampling pixel locations normalized by the
:attr:`input` spatial dimensions. Therefore, it should have most values in
the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the
left-top pixel of :attr:`input`, and values ``x = 1, y = 1`` is the
right-bottom pixel of :attr:`input`.
If :attr:`grid` has values outside the range of ``[-1, 1]``, the corresponding
outputs are handled as defined by :attr:`padding_mode`. Options are
* ``padding_mode="zeros"``: use ``0`` for out-of-bound grid locations,
* ``padding_mode="border"``: use border values for out-of-bound grid locations,
* ``padding_mode="reflection"``: use values at locations reflected by
the border for out-of-bound grid locations. For location far away
from the border, it will keep being reflected until becoming in bound,
e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1``
and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes
``x'' = -0.5``.
Note:
This function is often used in conjunction with :func:`affine_grid`
to build `Spatial Transformer Networks`_ .
Note:
When using the CUDA backend, this operation may induce nondeterministic
behaviour in its backward pass that is not easily switched off.
Please see the notes on :doc:`/notes/randomness` for background.
Note:
NaN values in :attr:`grid` would be interpreted as ``-1``.
Args:
input (Tensor): input of shape :math:`(N, C, H_\text{in}, W_\text{in})` (4-D case)
or :math:`(N, C, D_\text{in}, H_\text{in}, W_\text{in})` (5-D case)
grid (Tensor): flow-field of shape :math:`(N, H_\text{out}, W_\text{out}, 2)` (4-D case)
or :math:`(N, D_\text{out}, H_\text{out}, W_\text{out}, 3)` (5-D case)
mode (str): interpolation mode to calculate output values
``'bilinear'`` | ``'nearest'`` | ``'bicubic'``. Default: ``'bilinear'``
Note: ``mode='bicubic'`` supports only 4-D input.
When ``mode='bilinear'`` and the input is 5-D, the interpolation mode
used internally will actually be trilinear. However, when the input is 4-D,
the interpolation mode will legitimately be bilinear.
padding_mode (str): padding mode for outside grid values
``'zeros'`` | ``'border'`` | ``'reflection'``. Default: ``'zeros'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input as squares rather than points.
If set to ``True``, the extrema (``-1`` and ``1``) are considered as referring
to the center points of the input's corner pixels. If set to ``False``, they
are instead considered as referring to the corner points of the input's corner
pixels, making the sampling more resolution agnostic.
This option parallels the ``align_corners`` option in
:func:`interpolate`, and so whichever option is used here
should also be used there to resize the input image before grid sampling.
Default: ``False``
Returns:
output (Tensor): output Tensor
.. _`Spatial Transformer Networks`:
https://arxiv.org/abs/1506.02025
.. warning::
When ``align_corners = True``, the grid positions depend on the pixel
size relative to the input image size, and so the locations sampled by
:func:`grid_sample` will differ for the same input given at different
resolutions (that is, after being upsampled or downsampled).
The default behavior up to version 1.2.0 was ``align_corners = True``.
Since then, the default behavior has been changed to ``align_corners = False``,
in order to bring it in line with the default for :func:`interpolate`.
.. note::
``mode='bicubic'`` is implemented using the `cubic convolution algorithm`_ with :math:`\alpha=-0.75`.
The constant :math:`\alpha` might be different from packages to packages.
For example, `PIL`_ and `OpenCV`_ use -0.5 and -0.75 respectively.
This algorithm may "overshoot" the range of values it's interpolating.
For example, it may produce negative values or values greater than 255 when interpolating input in [0, 255].
Clamp the results with :func: `torch.clamp` to ensure they are within the valid range.
.. _`cubic convolution algorithm`: https://en.wikipedia.org/wiki/Bicubic_interpolation
.. _`PIL`: https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/src/libImaging/Resample.c#L51
.. _`OpenCV`: https://github.com/opencv/opencv/blob/f345ed564a06178670750bad59526cfa4033be55/modules/imgproc/src/resize.cpp#L908
"""
if has_torch_function_variadic(input, grid):
return handle_torch_function(
grid_sample, (input, grid), input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners
)
if mode != "bilinear" and mode != "nearest" and mode != "bicubic":
raise ValueError(
"nn.functional.grid_sample(): expected mode to be "
"'bilinear', 'nearest' or 'bicubic', but got: '{}'".format(mode)
)
if padding_mode != "zeros" and padding_mode != "border" and padding_mode != "reflection":
raise ValueError(
"nn.functional.grid_sample(): expected padding_mode "
"to be 'zeros', 'border', or 'reflection', "
"but got: '{}'".format(padding_mode)
)
if mode == "bilinear":
mode_enum = 0
elif mode == "nearest":
mode_enum = 1
else: # mode == 'bicubic'
mode_enum = 2
if padding_mode == "zeros":
padding_mode_enum = 0
elif padding_mode == "border":
padding_mode_enum = 1
else: # padding_mode == 'reflection'
padding_mode_enum = 2
if align_corners is None:
warnings.warn(
"Default grid_sample and affine_grid behavior has changed "
"to align_corners=False since 1.3.0. Please specify "
"align_corners=True if the old behavior is desired. "
"See the documentation of grid_sample for details."
)
align_corners = False
return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)
def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] = None) -> Tensor:
r"""Generates a 2D or 3D flow field (sampling grid), given a batch of
affine matrices :attr:`theta`.
.. note::
This function is often used in conjunction with :func:`grid_sample`
to build `Spatial Transformer Networks`_ .
Args:
theta (Tensor): input batch of affine matrices with shape
(:math:`N \times 2 \times 3`) for 2D or
(:math:`N \times 3 \times 4`) for 3D
size (torch.Size): the target output image size.
(:math:`N \times C \times H \times W` for 2D or
:math:`N \times C \times D \times H \times W` for 3D)
Example: torch.Size((32, 3, 24, 24))
align_corners (bool, optional): if ``True``, consider ``-1`` and ``1``
to refer to the centers of the corner pixels rather than the image corners.
Refer to :func:`grid_sample` for a more complete description.
A grid generated by :func:`affine_grid` should be passed to :func:`grid_sample`
with the same setting for this option.
Default: ``False``
Returns:
output (Tensor): output Tensor of size (:math:`N \times H \times W \times 2`)
.. _`Spatial Transformer Networks`:
https://arxiv.org/abs/1506.02025
.. warning::
When ``align_corners = True``, the grid positions depend on the pixel
size relative to the input image size, and so the locations sampled by
:func:`grid_sample` will differ for the same input given at different
resolutions (that is, after being upsampled or downsampled).
The default behavior up to version 1.2.0 was ``align_corners = True``.
Since then, the default behavior has been changed to ``align_corners = False``,
in order to bring it in line with the default for :func:`interpolate`.
.. warning::
When ``align_corners = True``, 2D affine transforms on 1D data and
3D affine transforms on 2D data (that is, when one of the spatial
dimensions has unit size) are ill-defined, and not an intended use case.
This is not a problem when ``align_corners = False``.
Up to version 1.2.0, all grid points along a unit dimension were
considered arbitrarily to be at ``-1``.
From version 1.3.0, under ``align_corners = True`` all grid points
along a unit dimension are considered to be at ``0``
(the center of the input image).
"""
if has_torch_function_unary(theta):
return handle_torch_function(affine_grid, (theta,), theta, size, align_corners=align_corners)
if align_corners is None:
warnings.warn(
"Default grid_sample and affine_grid behavior has changed "
"to align_corners=False since 1.3.0. Please specify "
"align_corners=True if the old behavior is desired. "
"See the documentation of grid_sample for details."
)
align_corners = False
# enforce floating point dtype on theta
if not theta.is_floating_point():
raise ValueError("Expected theta to have floating point type, but got {}".format(theta.dtype))
# check that shapes and sizes match
if len(size) == 4:
if theta.dim() != 3 or theta.shape[-2] != 2 or theta.shape[-1] != 3:
raise ValueError(
"Expected a batch of 2D affine matrices of shape Nx2x3 "
"for size {}. Got {}.".format(size, theta.shape)
)
spatial_size = size[-2:] # spatial dimension sizes
elif len(size) == 5:
if theta.dim() != 3 or theta.shape[-2] != 3 or theta.shape[-1] != 4:
raise ValueError(
"Expected a batch of 3D affine matrices of shape Nx3x4 "
"for size {}. Got {}.".format(size, theta.shape)
)
spatial_size = size[-3:] # spatial dimension sizes
else:
raise NotImplementedError(
"affine_grid only supports 4D and 5D sizes, "
"for 2D and 3D affine transforms, respectively. "
"Got size {}.".format(size)
)
# check for empty span
if align_corners and min(spatial_size) == 1:
warnings.warn(
"Since version 1.3.0, affine_grid behavior has changed "
"for unit-size grids when align_corners=True. "
"This is not an intended use case of affine_grid. "
"See the documentation of affine_grid for details."
)
elif min(size) <= 0:
raise ValueError("Expected non-zero, positive output size. Got {}".format(size))
return torch.affine_grid_generator(theta, size, align_corners)
pad = _add_docstr(
torch._C._nn.pad,
r"""
pad(input, pad, mode="constant", value=None) -> Tensor
Pads tensor.
Padding size:
The padding size by which to pad some dimensions of :attr:`input`
are described starting from the last dimension and moving forward.
:math:`\left\lfloor\frac{\text{len(pad)}}{2}\right\rfloor` dimensions
of ``input`` will be padded.
For example, to pad only the last dimension of the input tensor, then
:attr:`pad` has the form
:math:`(\text{padding\_left}, \text{padding\_right})`;
to pad the last 2 dimensions of the input tensor, then use
:math:`(\text{padding\_left}, \text{padding\_right},`
:math:`\text{padding\_top}, \text{padding\_bottom})`;
to pad the last 3 dimensions, use
:math:`(\text{padding\_left}, \text{padding\_right},`
:math:`\text{padding\_top}, \text{padding\_bottom}`
:math:`\text{padding\_front}, \text{padding\_back})`.
Padding mode:
See :class:`torch.nn.ConstantPad2d`, :class:`torch.nn.ReflectionPad2d`, and
:class:`torch.nn.ReplicationPad2d` for concrete examples on how each of the
padding modes works. Constant padding is implemented for arbitrary dimensions.
Replicate and reflection padding are implemented for padding the last 3
dimensions of a 4D or 5D input tensor, the last 2 dimensions of a 3D
or 4D input tensor, or the last dimension of a 2D or 3D input tensor.
Note:
When using the CUDA backend, this operation may induce nondeterministic
behaviour in its backward pass that is not easily switched off.
Please see the notes on :doc:`/notes/randomness` for background.
Args:
input (Tensor): N-dimensional tensor
pad (tuple): m-elements tuple, where
:math:`\frac{m}{2} \leq` input dimensions and :math:`m` is even.
mode: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``.
Default: ``'constant'``
value: fill value for ``'constant'`` padding. Default: ``0``
Examples::
>>> t4d = torch.empty(3, 3, 4, 2)
>>> p1d = (1, 1) # pad last dim by 1 on each side
>>> out = F.pad(t4d, p1d, "constant", 0) # effectively zero padding
>>> print(out.size())
torch.Size([3, 3, 4, 4])
>>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2)
>>> out = F.pad(t4d, p2d, "constant", 0)
>>> print(out.size())
torch.Size([3, 3, 8, 4])
>>> t4d = torch.empty(3, 3, 4, 2)
>>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3)
>>> out = F.pad(t4d, p3d, "constant", 0)
>>> print(out.size())
torch.Size([3, 9, 7, 3])
""")
# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
pad.__module__ = "torch.nn.functional"
# distance
pairwise_distance = _add_docstr(
torch.pairwise_distance,
r"""
pairwise_distance(x1, x2, p=2.0, eps=1e-6, keepdim=False) -> Tensor
See :class:`torch.nn.PairwiseDistance` for details
""")
pdist = _add_docstr(
torch.pdist,
r"""
pdist(input, p=2) -> Tensor
Computes the p-norm distance between every pair of row vectors in the input.
This is identical to the upper triangular portion, excluding the diagonal, of
`torch.norm(input[:, None] - input, dim=2, p=p)`. This function will be faster
if the rows are contiguous.
If input has shape :math:`N \times M` then the output will have shape
:math:`\frac{1}{2} N (N - 1)`.
This function is equivalent to ``scipy.spatial.distance.pdist(input,
'minkowski', p=p)`` if :math:`p \in (0, \infty)`. When :math:`p = 0` it is
equivalent to ``scipy.spatial.distance.pdist(input, 'hamming') * M``.
When :math:`p = \infty`, the closest scipy function is
``scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max())``.
Args:
input: input tensor of shape :math:`N \times M`.
p: p value for the p-norm distance to calculate between each vector pair
:math:`\in [0, \infty]`.
""",
)
cosine_similarity = _add_docstr(
torch.cosine_similarity,
r"""
cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor
Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable
to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is
squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 fewer dimension.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}
Supports :ref:`type promotion <type-promotion-doc>`.
Args:
x1 (Tensor): First input.
x2 (Tensor): Second input.
dim (int, optional): Dimension along which cosine similarity is computed. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
Example::
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = F.cosine_similarity(input1, input2)
>>> print(output)
""",
)
one_hot = _add_docstr(
torch._C._nn.one_hot,
r"""
one_hot(tensor, num_classes=-1) -> LongTensor
Takes LongTensor with index values of shape ``(*)`` and returns a tensor
of shape ``(*, num_classes)`` that have zeros everywhere except where the
index of last dimension matches the corresponding value of the input tensor,
in which case it will be 1.
See also `One-hot on Wikipedia`_ .
.. _One-hot on Wikipedia:
https://en.wikipedia.org/wiki/One-hot
Arguments:
tensor (LongTensor): class values of any shape.
num_classes (int): Total number of classes. If set to -1, the number
of classes will be inferred as one greater than the largest class
value in the input tensor.
Returns:
LongTensor that has one more dimension with 1 values at the
index of last dimension indicated by the input, and 0 everywhere
else.
Examples:
>>> F.one_hot(torch.arange(0, 5) % 3)
tensor([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
>>> F.one_hot(torch.arange(0, 5) % 3, num_classes=5)
tensor([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]])
>>> F.one_hot(torch.arange(0, 6).view(3,2) % 3)
tensor([[[1, 0, 0],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0]],
[[0, 1, 0],
[0, 0, 1]]])
""",
)
def triplet_margin_loss(
anchor: Tensor,
positive: Tensor,
negative: Tensor,
margin: float = 1.0,
p: float = 2,
eps: float = 1e-6,
swap: bool = False,
size_average: Optional[bool] = None,
reduce: Optional[bool] = None,
reduction: str = "mean",
) -> Tensor:
r"""
See :class:`~torch.nn.TripletMarginLoss` for details
"""
if has_torch_function_variadic(anchor, positive, negative):
return handle_torch_function(
triplet_margin_loss,
(anchor, positive, negative),
anchor,
positive,
negative,
margin=margin,
p=p,
eps=eps,
swap=swap,
size_average=size_average,
reduce=reduce,
reduction=reduction,
)
if size_average is not None or reduce is not None:
reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
reduction_enum = _Reduction.get_enum(reduction)
return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction_enum)
def triplet_margin_with_distance_loss(
anchor: Tensor,
positive: Tensor,
negative: Tensor,
*,
distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
margin: float = 1.0,
swap: bool = False,
reduction: str = "mean"
) -> Tensor:
r"""
See :class:`~torch.nn.TripletMarginWithDistanceLoss` for details.
"""
if torch.jit.is_scripting():
raise NotImplementedError(
"F.triplet_margin_with_distance_loss does not support JIT scripting: "
"functions requiring Callables cannot be scripted."
)
if has_torch_function_variadic(anchor, positive, negative):
return handle_torch_function(
triplet_margin_with_distance_loss,
(anchor, positive, negative),
anchor,
positive,
negative,
distance_function=distance_function,
margin=margin,
swap=swap,
reduction=reduction,
)
distance_function = distance_function if distance_function is not None else pairwise_distance
positive_dist = distance_function(anchor, positive)
negative_dist = distance_function(anchor, negative)
if swap:
swap_dist = distance_function(positive, negative)
negative_dist = torch.min(negative_dist, swap_dist)
output = torch.clamp(positive_dist - negative_dist + margin, min=0.0)
reduction_enum = _Reduction.get_enum(reduction)
if reduction_enum == 1:
return output.mean()
elif reduction_enum == 2:
return output.sum()
else:
return output
def normalize(input: Tensor, p: float = 2.0, dim: int = 1, eps: float = 1e-12, out: Optional[Tensor] = None) -> Tensor:
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.
Args:
input: input tensor of any shape
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
eps (float): small value to avoid division by zero. Default: 1e-12
out (Tensor, optional): the output tensor. If :attr:`out` is used, this
operation won't be differentiable.
"""
if has_torch_function_variadic(input, out):
return handle_torch_function(normalize, (input, out), input, p=p, dim=dim, eps=eps, out=out)
if out is None:
denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)
return input / denom
else:
denom = input.norm(p, dim, keepdim=True).clamp_min_(eps).expand_as(input)
return torch.div(input, denom, out=out)
def assert_int_or_pair(arg: List[int], arg_name: str, message: str) -> None:
assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name)
def unfold(
input: Tensor, kernel_size: BroadcastingList2[int],
dilation: BroadcastingList2[int] = 1,
padding: BroadcastingList2[int] = 0,
stride: BroadcastingList2[int] = 1
) -> Tensor:
r"""Extracts sliding local blocks from a batched input tensor.
.. warning::
Currently, only 4-D input tensors (batched image-like tensors) are
supported.
.. warning::
More than one element of the unfolded tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensor, please clone it first.
See :class:`torch.nn.Unfold` for details
"""
if has_torch_function_unary(input):
return handle_torch_function(
unfold, (input,), input, kernel_size, dilation=dilation, padding=padding, stride=stride
)
if input.dim() == 4:
msg = "{} must be int or 2-tuple for 4D input"
assert_int_or_pair(kernel_size, "kernel_size", msg)
assert_int_or_pair(dilation, "dilation", msg)
assert_int_or_pair(padding, "padding", msg)
assert_int_or_pair(stride, "stride", msg)
return torch._C._nn.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))
else:
raise NotImplementedError("Input Error: Only 4D input Tensors are supported (got {}D)".format(input.dim()))
def fold(
input: Tensor, output_size: BroadcastingList2[int],
kernel_size: BroadcastingList2[int],
dilation: BroadcastingList2[int] = 1,
padding: BroadcastingList2[int] = 0,
stride: BroadcastingList2[int] = 1
) -> Tensor:
r"""Combines an array of sliding local blocks into a large containing
tensor.
.. warning::
Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported.
See :class:`torch.nn.Fold` for details
"""
if has_torch_function_unary(input):
return handle_torch_function(
fold, (input,), input, output_size, kernel_size, dilation=dilation, padding=padding, stride=stride
)
if input.dim() == 3 or input.dim() == 2:
msg = "{} must be int or 2-tuple for 3D input"
assert_int_or_pair(output_size, "output_size", msg)
assert_int_or_pair(kernel_size, "kernel_size", msg)
assert_int_or_pair(dilation, "dilation", msg)
assert_int_or_pair(padding, "padding", msg)
assert_int_or_pair(stride, "stride", msg)
return torch._C._nn.col2im(
input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride)
)
else:
raise NotImplementedError("Input Error: Only unbatched (2D) or batched (3D) input Tensors"
f"are supported (got {input.dim()}D)")
#
# multihead attention
#
def _in_projection_packed(
q: Tensor,
k: Tensor,
v: Tensor,
w: Tensor,
b: Optional[Tensor] = None,
) -> List[Tensor]:
r"""
Performs the in-projection step of the attention operation, using packed weights.
Output is a triple containing projection tensors for query, key and value.
Args:
q, k, v: query, key and value tensors to be projected. For self-attention,
these are typically the same tensor; for encoder-decoder attention,
k and v are typically the same tensor. (We take advantage of these
identities for performance if they are present.) Regardless, q, k and v
must share a common embedding dimension; otherwise their shapes may vary.
w: projection weights for q, k and v, packed into a single tensor. Weights
are packed along dimension 0, in q, k, v order.
b: optional projection biases for q, k and v, packed into a single tensor
in q, k, v order.
Shape:
Inputs:
- q: :math:`(..., E)` where E is the embedding dimension
- k: :math:`(..., E)` where E is the embedding dimension
- v: :math:`(..., E)` where E is the embedding dimension
- w: :math:`(E * 3, E)` where E is the embedding dimension
- b: :math:`E * 3` where E is the embedding dimension
Output:
- in output list :math:`[q', k', v']`, each output tensor will have the
same shape as the corresponding input tensor.
"""
E = q.size(-1)
if k is v:
if q is k:
# self-attention
return linear(q, w, b).chunk(3, dim=-1)
else:
# encoder-decoder attention
w_q, w_kv = w.split([E, E * 2])
if b is None:
b_q = b_kv = None
else:
b_q, b_kv = b.split([E, E * 2])
return (linear(q, w_q, b_q),) + linear(k, w_kv, b_kv).chunk(2, dim=-1)
else:
w_q, w_k, w_v = w.chunk(3)
if b is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = b.chunk(3)
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
def _in_projection(
q: Tensor,
k: Tensor,
v: Tensor,
w_q: Tensor,
w_k: Tensor,
w_v: Tensor,
b_q: Optional[Tensor] = None,
b_k: Optional[Tensor] = None,
b_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
r"""
Performs the in-projection step of the attention operation. This is simply
a triple of linear projections, with shape constraints on the weights which
ensure embedding dimension uniformity in the projected outputs.
Output is a triple containing projection tensors for query, key and value.
Args:
q, k, v: query, key and value tensors to be projected.
w_q, w_k, w_v: weights for q, k and v, respectively.
b_q, b_k, b_v: optional biases for q, k and v, respectively.
Shape:
Inputs:
- q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
number of leading dimensions.
- k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
number of leading dimensions.
- v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
number of leading dimensions.
- w_q: :math:`(Eq, Eq)`
- w_k: :math:`(Eq, Ek)`
- w_v: :math:`(Eq, Ev)`
- b_q: :math:`(Eq)`
- b_k: :math:`(Eq)`
- b_v: :math:`(Eq)`
Output: in output triple :math:`(q', k', v')`,
- q': :math:`[Qdims..., Eq]`
- k': :math:`[Kdims..., Eq]`
- v': :math:`[Vdims..., Eq]`
"""
Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
def _scaled_dot_product_attention(
q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
) -> Tuple[Tensor, Tensor]:
r"""
Computes scaled dot product attention on query, key and value tensors, using
an optional attention mask if passed, and applying dropout if a probability
greater than 0.0 is specified.
Returns a tensor pair containing attended values and attention weights.
Args:
q, k, v: query, key and value tensors. See Shape section for shape details.
attn_mask: optional tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q: :math:`(B, Nt, E)` where B is batch size, Nt is the target sequence length,
and E is embedding dimension.
- key: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- value: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- attn_mask: either a 3D tensor of shape :math:`(B, Nt, Ns)` or a 2D tensor of
shape :math:`(Nt, Ns)`.
- Output: attention values have shape :math:`(B, Nt, E)`; attention weights
have shape :math:`(B, Nt, Ns)`
"""
B, Nt, E = q.shape
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
if attn_mask is not None:
attn = torch.baddbmm(attn_mask, q, k.transpose(-2, -1))
else:
attn = torch.bmm(q, k.transpose(-2, -1))
attn = softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = dropout(attn, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output, attn
def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
# Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
# and returns if the input is batched or not.
# Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
# Shape check.
if query.dim() == 3:
# Batched Inputs
is_batched = True
assert key.dim() == 3 and value.dim() == 3, \
("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
if key_padding_mask is not None:
assert key_padding_mask.dim() == 2, \
("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
f" but found {key_padding_mask.dim()}-D tensor instead")
if attn_mask is not None:
assert attn_mask.dim() in (2, 3), \
("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
f" but found {attn_mask.dim()}-D tensor instead")
elif query.dim() == 2:
# Unbatched Inputs
is_batched = False
assert key.dim() == 2 and value.dim() == 2, \
("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
if key_padding_mask is not None:
assert key_padding_mask.dim() == 1, \
("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
f" but found {key_padding_mask.dim()}-D tensor instead")
if attn_mask is not None:
assert attn_mask.dim() in (2, 3), \
("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
f" but found {attn_mask.dim()}-D tensor instead")
if attn_mask.dim() == 3:
expected_shape = (num_heads, query.shape[0], key.shape[0])
assert attn_mask.shape == expected_shape, \
(f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
else:
raise AssertionError(
f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
return is_batched
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Optional[Tensor],
in_proj_bias: Optional[Tensor],
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Optional[Tensor],
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
average_attn_weights: bool = True,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
when ``need_weights=True.``. Default: True
Shape:
Inputs:
- query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
"""
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
average_attn_weights=average_attn_weights,
)
is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
# For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
# is batched, run the computation and before returning squeeze the
# batch dimension so that the output doesn't carry this temporary batch dimension.
if not is_batched:
# unsqueeze if the input is unbatched
query = query.unsqueeze(1)
key = key.unsqueeze(1)
value = value.unsqueeze(1)
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(0)
# set up shape vars
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
assert embed_dim == embed_dim_to_check, \
f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
if isinstance(embed_dim, torch.Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
if use_separate_proj_weight:
# allow MHA to have different embedding dimensions when separate projection weights are used
assert key.shape[:2] == value.shape[:2], \
f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
else:
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
#
# compute in-projection
#
if not use_separate_proj_weight:
assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
else:
assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = in_proj_bias.chunk(3)
q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
# prep attention mask
if attn_mask is not None:
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
else:
assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
f"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}"
# ensure attn_mask's dim is 3
if attn_mask.dim() == 2:
correct_2d_size = (tgt_len, src_len)
if attn_mask.shape != correct_2d_size:
raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
if attn_mask.shape != correct_3d_size:
raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
else:
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
# prep key padding mask
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
# add bias along batch dimension (currently second)
if bias_k is not None and bias_v is not None:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert bias_k is None
assert bias_v is None
#
# reshape q, k, v for multihead attention and make em batch first
#
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is None:
k = k.contiguous().view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert static_k.size(0) == bsz * num_heads, \
f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
assert static_k.size(2) == head_dim, \
f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
k = static_k
if static_v is None:
v = v.contiguous().view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert static_v.size(0) == bsz * num_heads, \
f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
assert static_v.size(2) == head_dim, \
f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
v = static_v
# add zero attention along batch dimension (now first)
if add_zero_attn:
zero_attn_shape = (bsz * num_heads, 1, head_dim)
k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
# update source sequence length after adjustments
src_len = k.size(1)
# merge key padding and attention masks
if key_padding_mask is not None:
assert key_padding_mask.shape == (bsz, src_len), \
f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
if attn_mask is None:
attn_mask = key_padding_mask
elif attn_mask.dtype == torch.bool:
attn_mask = attn_mask.logical_or(key_padding_mask)
else:
attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf"))
# convert mask to float
if attn_mask is not None and attn_mask.dtype == torch.bool:
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
attn_mask = new_attn_mask
# adjust dropout probability
if not training:
dropout_p = 0.0
#
# (deep breath) calculate attention and out projection
#
attn_output, attn_output_weights = _scaled_dot_product_attention(q, k, v, attn_mask, dropout_p)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
if need_weights:
# optionally average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
if average_attn_weights:
attn_output_weights = attn_output_weights.sum(dim=1) / num_heads
if not is_batched:
# squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
attn_output_weights = attn_output_weights.squeeze(0)
return attn_output, attn_output_weights
else:
if not is_batched:
# squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
return attn_output, None
|
pytorch-master
|
torch/nn/functional.py
|
import math
import warnings
from torch import Tensor
import torch
# These no_grad_* functions are necessary as wrappers around the parts of these
# functions that use `with torch.no_grad()`. The JIT doesn't support context
# managers, so these need to be implemented as builtins. Using these wrappers
# lets us keep those builtins small and re-usable.
def _no_grad_uniform_(tensor, a, b):
with torch.no_grad():
return tensor.uniform_(a, b)
def _no_grad_normal_(tensor, mean, std):
with torch.no_grad():
return tensor.normal_(mean, std)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def _no_grad_fill_(tensor, val):
with torch.no_grad():
return tensor.fill_(val)
def _no_grad_zero_(tensor):
with torch.no_grad():
return tensor.zero_()
def calculate_gain(nonlinearity, param=None):
r"""Return the recommended gain value for the given nonlinearity function.
The values are as follows:
================= ====================================================
nonlinearity gain
================= ====================================================
Linear / Identity :math:`1`
Conv{1,2,3}D :math:`1`
Sigmoid :math:`1`
Tanh :math:`\frac{5}{3}`
ReLU :math:`\sqrt{2}`
Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
SELU :math:`\frac{3}{4}`
================= ====================================================
.. warning::
In order to implement `Self-Normalizing Neural Networks`_ ,
you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``.
This gives the initial weights a variance of ``1 / N``,
which is necessary to induce a stable fixed point in the forward pass.
In contrast, the default gain for ``SELU`` sacrifices the normalisation
effect for more stable gradient flow in rectangular layers.
Args:
nonlinearity: the non-linear function (`nn.functional` name)
param: optional parameter for the non-linear function
Examples:
>>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2
.. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html
"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
elif nonlinearity == 'selu':
return 3.0 / 4 # Value found empirically (https://github.com/pytorch/pytorch/pull/50664)
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
def uniform_(tensor: Tensor, a: float = 0., b: float = 1.) -> Tensor:
r"""Fills the input Tensor with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.uniform_(w)
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(uniform_, (tensor,), tensor=tensor, a=a, b=b)
return _no_grad_uniform_(tensor, a, b)
def normal_(tensor: Tensor, mean: float = 0., std: float = 1.) -> Tensor:
r"""Fills the input Tensor with values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.normal_(w)
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(normal_, (tensor,), tensor=tensor, mean=mean, std=std)
return _no_grad_normal_(tensor, mean, std)
def trunc_normal_(tensor: Tensor, mean: float = 0., std: float = 1., a: float = -2., b: float = 2.) -> Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def constant_(tensor: Tensor, val: float) -> Tensor:
r"""Fills the input Tensor with the value :math:`\text{val}`.
Args:
tensor: an n-dimensional `torch.Tensor`
val: the value to fill the tensor with
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.constant_(w, 0.3)
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(constant_, (tensor,), tensor=tensor, val=val)
return _no_grad_fill_(tensor, val)
def ones_(tensor: Tensor) -> Tensor:
r"""Fills the input Tensor with the scalar value `1`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.ones_(w)
"""
return _no_grad_fill_(tensor, 1.)
def zeros_(tensor: Tensor) -> Tensor:
r"""Fills the input Tensor with the scalar value `0`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.zeros_(w)
"""
return _no_grad_zero_(tensor)
def eye_(tensor):
r"""Fills the 2-dimensional input `Tensor` with the identity
matrix. Preserves the identity of the inputs in `Linear` layers, where as
many inputs are preserved as possible.
Args:
tensor: a 2-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.eye_(w)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
with torch.no_grad():
torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)
return tensor
def dirac_(tensor, groups=1):
r"""Fills the {3, 4, 5}-dimensional input `Tensor` with the Dirac
delta function. Preserves the identity of the inputs in `Convolutional`
layers, where as many input channels are preserved as possible. In case
of groups>1, each group of channels preserves identity
Args:
tensor: a {3, 4, 5}-dimensional `torch.Tensor`
groups (int, optional): number of groups in the conv layer (default: 1)
Examples:
>>> w = torch.empty(3, 16, 5, 5)
>>> nn.init.dirac_(w)
>>> w = torch.empty(3, 24, 5, 5)
>>> nn.init.dirac_(w, 3)
"""
dimensions = tensor.ndimension()
if dimensions not in [3, 4, 5]:
raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported")
sizes = tensor.size()
if sizes[0] % groups != 0:
raise ValueError('dim 0 must be divisible by groups')
out_chans_per_grp = sizes[0] // groups
min_dim = min(out_chans_per_grp, sizes[1])
with torch.no_grad():
tensor.zero_()
for g in range(groups):
for d in range(min_dim):
if dimensions == 3: # Temporal convolution
tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1
elif dimensions == 4: # Spatial convolution
tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,
tensor.size(3) // 2] = 1
else: # Volumetric convolution
tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2,
tensor.size(3) // 2, tensor.size(4) // 2] = 1
return tensor
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.dim()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
# math.prod is not always available, accumulate the product manually
# we could use functools.reduce but that is not supported by TorchScript
for s in tensor.shape[2:]:
receptive_field_size *= s
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def xavier_uniform_(tensor: Tensor, gain: float = 1.) -> Tensor:
r"""Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return _no_grad_uniform_(tensor, -a, a)
def xavier_normal_(tensor: Tensor, gain: float = 1.) -> Tensor:
r"""Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_normal_(w)
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
return _no_grad_normal_(tensor, 0., std)
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def kaiming_uniform_(
tensor: Tensor, a: float = 0, mode: str = 'fan_in', nonlinearity: str = 'leaky_relu'
):
r"""Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
"""
if torch.overrides.has_torch_function_variadic(tensor):
return torch.overrides.handle_torch_function(
kaiming_uniform_,
(tensor,),
tensor=tensor,
a=a,
mode=mode,
nonlinearity=nonlinearity)
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
with torch.no_grad():
return tensor.uniform_(-bound, bound)
def kaiming_normal_(
tensor: Tensor, a: float = 0, mode: str = 'fan_in', nonlinearity: str = 'leaky_relu'
):
r"""Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
"""
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
with torch.no_grad():
return tensor.normal_(0, std)
def orthogonal_(tensor, gain=1):
r"""Fills the input `Tensor` with a (semi) orthogonal matrix, as
described in `Exact solutions to the nonlinear dynamics of learning in deep
linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
at least 2 dimensions, and for tensors with more than 2 dimensions the
trailing dimensions are flattened.
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
gain: optional scaling factor
Examples:
>>> # xdoctest: +REQUIRES(--lapack)
>>> w = torch.empty(3, 5)
>>> nn.init.orthogonal_(w)
"""
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
if tensor.numel() == 0:
# no-op
return tensor
rows = tensor.size(0)
cols = tensor.numel() // rows
flattened = tensor.new(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
q, r = torch.linalg.qr(flattened)
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
d = torch.diag(r, 0)
ph = d.sign()
q *= ph
if rows < cols:
q.t_()
with torch.no_grad():
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def sparse_(tensor, sparsity, std=0.01):
r"""Fills the 2D input `Tensor` as a sparse matrix, where the
non-zero elements will be drawn from the normal distribution
:math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
Hessian-free optimization` - Martens, J. (2010).
Args:
tensor: an n-dimensional `torch.Tensor`
sparsity: The fraction of elements in each column to be set to zero
std: the standard deviation of the normal distribution used to generate
the non-zero values
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.sparse_(w, sparsity=0.1)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
rows, cols = tensor.shape
num_zeros = int(math.ceil(sparsity * rows))
with torch.no_grad():
tensor.normal_(0, std)
for col_idx in range(cols):
row_indices = torch.randperm(rows)
zero_indices = row_indices[:num_zeros]
tensor[zero_indices, col_idx] = 0
return tensor
# for backward compatibility
def _make_deprecate(meth):
new_name = meth.__name__
old_name = new_name[:-1]
def deprecated_init(*args, **kwargs):
warnings.warn("nn.init.{} is now deprecated in favor of nn.init.{}."
.format(old_name, new_name), stacklevel=2)
return meth(*args, **kwargs)
deprecated_init.__doc__ = r"""
{old_name}(...)
.. warning::
This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.
See :func:`~torch.nn.init.{new_name}` for details.""".format(
old_name=old_name, new_name=new_name)
deprecated_init.__name__ = old_name
return deprecated_init
uniform = _make_deprecate(uniform_)
normal = _make_deprecate(normal_)
constant = _make_deprecate(constant_)
eye = _make_deprecate(eye_)
dirac = _make_deprecate(dirac_)
xavier_uniform = _make_deprecate(xavier_uniform_)
xavier_normal = _make_deprecate(xavier_normal_)
kaiming_uniform = _make_deprecate(kaiming_uniform_)
kaiming_normal = _make_deprecate(kaiming_normal_)
orthogonal = _make_deprecate(orthogonal_)
sparse = _make_deprecate(sparse_)
|
pytorch-master
|
torch/nn/init.py
|
"""Gradient interface"""
import torch
from .modules.utils import _single, _pair, _triple
def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the input of the convolution.
This is same as the 1D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kW)
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv1d_input(input.shape, weight, grad_output)
"""
input = grad_output.new_empty(1).expand(input_size)
return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
_single(stride), _single(padding), _single(dilation),
False, [0], groups, (True, False, False))[0]
def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv1d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3, requires_grad=True)
>>> weight = torch.randn(1,1,1, requires_grad=True)
>>> output = F.conv1d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> # xdoctest: +SKIP
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv1d_weight(input, weight.shape, grad_output)
"""
weight = grad_output.new_empty(1).expand(weight_size)
return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
_single(stride), _single(padding), _single(dilation),
False, [0], groups, (False, True, False))[1]
def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the input of the convolution.
This is same as the 2D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weight tensor (out_channels x in_channels/groups x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv2d_input(input.shape, weight, grad_output)
"""
input = grad_output.new_empty(1).expand(input_size)
return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
_pair(stride), _pair(padding), _pair(dilation),
False, [0], groups, (True, False, False))[0]
def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv2d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(1,1,3,3, requires_grad=True)
>>> weight = torch.randn(1,1,1,2, requires_grad=True)
>>> output = F.conv2d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> # xdoctest: +SKIP
>>> grad_weight = torch.autograd.grad(output, filter, grad_output)
>>> F.grad.conv2d_weight(input, weight.shape, grad_output)
"""
weight = grad_output.new_empty(1).expand(weight_size)
return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
_pair(stride), _pair(padding), _pair(dilation),
False, [0], groups, (False, True, False))[1]
def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the input of the convolution.
This is same as the 3D transposed convolution operator under the hood but requires
the shape of the gradient w.r.t. input to be specified explicitly.
Args:
input_size : Shape of the input gradient tensor
weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW)
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_input = torch.autograd.grad(output, input, grad_output)
>>> F.grad.conv3d_input(input.shape, weight, grad_output)
"""
input = grad_output.new_empty(1).expand(input_size)
return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
_triple(stride), _triple(padding), _triple(dilation),
False, [0], groups, (True, False, False))[0]
def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1):
r"""
Computes the gradient of conv3d with respect to the weight of the convolution.
Args:
input: input tensor of shape (minibatch x in_channels x iT x iH x iW)
weight_size : Shape of the weight gradient tensor
grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW)
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
Examples::
>>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True)
>>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True)
>>> output = F.conv3d(input, weight)
>>> grad_output = torch.randn(output.shape)
>>> grad_weight = torch.autograd.grad(output, weight, grad_output)
>>> F.grad.conv3d_weight(input, weight.shape, grad_output)
"""
weight = grad_output.new_empty(1).expand(weight_size)
return torch.ops.aten.convolution_backward(grad_output, input, weight, None,
_triple(stride), _triple(padding), _triple(dilation),
False, [0], groups, (False, True, False))[1]
|
pytorch-master
|
torch/nn/grad.py
|
import torch
from torch._C import _disabled_torch_function_impl
from collections import OrderedDict
# Metaclass to combine _TensorMeta and the instance check override for Parameter.
class _ParameterMeta(torch._C._TensorMeta):
# Make `isinstance(t, Parameter)` return True for custom tensor instances that have the _is_param flag.
def __instancecheck__(self, instance):
return super().__instancecheck__(instance) or (
isinstance(instance, torch.Tensor) and getattr(instance, '_is_param', False))
class Parameter(torch.Tensor, metaclass=_ParameterMeta):
r"""A kind of Tensor that is to be considered a module parameter.
Parameters are :class:`~torch.Tensor` subclasses, that have a
very special property when used with :class:`Module` s - when they're
assigned as Module attributes they are automatically added to the list of
its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator.
Assigning a Tensor doesn't have such effect. This is because one might
want to cache some temporary state, like last hidden state of the RNN, in
the model. If there was no such class as :class:`Parameter`, these
temporaries would get registered too.
Args:
data (Tensor): parameter tensor.
requires_grad (bool, optional): if the parameter requires gradient. See
:ref:`locally-disable-grad-doc` for more details. Default: `True`
"""
def __new__(cls, data=None, requires_grad=True):
if data is None:
data = torch.empty(0)
if type(data) is torch.Tensor or type(data) is Parameter:
# For ease of BC maintenance, keep this path for standard Tensor.
# Eventually (tm), we should change the behavior for standard Tensor to match.
return torch.Tensor._make_subclass(cls, data, requires_grad)
# Path for custom tensors: set a flag on the instance to indicate parameter-ness.
t = data.detach().requires_grad_(requires_grad)
if type(t) is not type(data):
raise RuntimeError(f"Creating a Parameter from an instance of type {type(data).__name__} "
"requires that detach() returns an instance of the same type, but return "
f"type {type(t).__name__} was found instead. To use the type as a "
"Parameter, please correct the detach() semantics defined by "
"its __torch_dispatch__() implementation.")
t._is_param = True
return t
# Note: the 3 methods below only apply to standard Tensor. Parameters of custom tensor types
# are still considered that custom tensor type and these methods will not be called for them.
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(self.data.clone(memory_format=torch.preserve_format), self.requires_grad)
memo[id(self)] = result
return result
def __repr__(self):
return 'Parameter containing:\n' + super(Parameter, self).__repr__()
def __reduce_ex__(self, proto):
# See Note [Don't serialize hooks]
return (
torch._utils._rebuild_parameter,
(self.data, self.requires_grad, OrderedDict())
)
__torch_function__ = _disabled_torch_function_impl
class UninitializedTensorMixin:
_allowed_methods = [
torch.Tensor.__hash__,
torch.Tensor.size,
torch.Tensor.copy_,
torch.Tensor.is_floating_point,
torch.Tensor.half,
torch.Tensor.float,
torch.Tensor.double,
torch.Tensor.char,
torch.Tensor.short,
torch.Tensor.int,
torch.Tensor.long,
torch.Tensor.cuda,
torch.Tensor.cpu,
torch.Tensor.to,
torch.Tensor.get_device,
torch._has_compatible_shallow_copy_type,
]
def materialize(self, shape, device=None, dtype=None):
r"""Create a Parameter or Tensor with the same properties of the uninitialized one.
Given a shape, it materializes a parameter in the same device
and with the same `dtype` as the current one or the specified ones in the
arguments.
Args:
shape : (tuple): the shape for the materialized tensor.
device (:class:`torch.device`): the desired device of the parameters
and buffers in this module. Optional.
dtype (:class:`torch.dtype`): the desired floating point type of
the floating point parameters and buffers in this module. Optional.
"""
if device is None:
device = self.data.device
if dtype is None:
dtype = self.data.dtype
self.data = torch.empty(shape, device=device, dtype=dtype)
self.__class__ = self.cls_to_become
@property
def shape(self):
raise RuntimeError(
'Can\'t access the shape of an uninitialized parameter or buffer. '
'This error usually happens in `load_state_dict` when trying to load '
'an uninitialized parameter into an initialized one. '
'Call `forward` to initialize the parameters before accessing their attributes.')
def share_memory_(self):
raise RuntimeError(
'Can\'t share memory on an uninitialized parameter or buffer. '
'Call `forward` to initialize the parameters before calling '
'`module.share_memory()`.')
def __repr__(self):
return f'<{self.__class__.__name__}>'
def __reduce_ex__(self, proto):
# See Note [Don't serialize hooks]
return (
self.__class__,
(self.requires_grad,)
)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
# method-wrapper is to detect access to Tensor properties that are
# wrapped in descriptors
if func in cls._allowed_methods or func.__class__.__name__ == 'method-wrapper':
if kwargs is None:
kwargs = {}
return super().__torch_function__(func, types, args, kwargs)
raise ValueError(
'Attempted to use an uninitialized parameter in {}. '
'This error happens when you are using a `LazyModule` or '
'explicitly manipulating `torch.nn.parameter.{}` '
'objects. When using LazyModules Call `forward` with a dummy batch '
'to initialize the parameters before calling torch functions'.format(func, cls.__name__))
def is_lazy(param):
return isinstance(param, UninitializedTensorMixin)
class UninitializedParameter(UninitializedTensorMixin, Parameter):
r"""A parameter that is not initialized.
Unitialized Parameters are a a special case of :class:`torch.nn.Parameter`
where the shape of the data is still unknown.
Unlike a :class:`torch.nn.Parameter`, uninitialized parameters
hold no data and attempting to access some properties, like their shape,
will throw a runtime error. The only operations that can be performed on a uninitialized
parameter are changing its datatype, moving it to a different device and
converting it to a regular :class:`torch.nn.Parameter`.
The default device or dtype to use when the parameter is materialized can be set
during construction using e.g. ``device='cuda'``.
"""
cls_to_become = Parameter
def __new__(cls, requires_grad=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
data = torch.empty(0, **factory_kwargs)
return torch.Tensor._make_subclass(cls, data, requires_grad)
class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor):
r"""A buffer that is not initialized.
Unitialized Buffer is a a special case of :class:`torch.Tensor`
where the shape of the data is still unknown.
Unlike a :class:`torch.Tensor`, uninitialized parameters
hold no data and attempting to access some properties, like their shape,
will throw a runtime error. The only operations that can be performed on a uninitialized
parameter are changing its datatype, moving it to a different device and
converting it to a regular :class:`torch.Tensor`.
The default device or dtype to use when the buffer is materialized can be set
during construction using e.g. ``device='cuda'``.
"""
cls_to_become = torch.Tensor
def __new__(cls, requires_grad=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
data = torch.empty(0, **factory_kwargs)
return torch.Tensor._make_subclass(cls, data, requires_grad)
|
pytorch-master
|
torch/nn/parameter.py
|
from contextlib import contextmanager
_DDP_WITH_REPLICATED_TENSOR = False
@contextmanager
def _ddp_replicated_tensor(val):
"""
A context manager to tag tensors in the forward pass of DDP to be
``ReplicatedTensor``. This can be used by ReplicatedTensor inter-op
during the forward pass to perform appropriate optimizations.
This context manager needs to wrap DDP creation and modifying the underlying
module passed into DDP after leaving this context manager would cause
inconsitencies and the changes will not be picked up during the forward
pass.
"""
global _DDP_WITH_REPLICATED_TENSOR
old_val = _DDP_WITH_REPLICATED_TENSOR
_DDP_WITH_REPLICATED_TENSOR = val
try:
yield
finally:
_DDP_WITH_REPLICATED_TENSOR = old_val
def _ddp_with_replicated_tensor_enabled():
global _DDP_WITH_REPLICATED_TENSOR
return _DDP_WITH_REPLICATED_TENSOR
def _set_ddp_with_replicated_tensor(value):
global _DDP_WITH_REPLICATED_TENSOR
_DDP_WITH_REPLICATED_TENSOR = value
|
pytorch-master
|
torch/nn/parallel/_replicated_tensor_ddp_utils.py
|
import threading
import torch
from torch.cuda._utils import _get_device_index
from torch.cuda.amp import autocast
from torch._utils import ExceptionWrapper
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None):
r"""Applies each `module` in :attr:`modules` in parallel on arguments
contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
on each of :attr:`devices`.
Args:
modules (Module): modules to be parallelized
inputs (tensor): inputs to the modules
devices (list of int or torch.device): CUDA devices
:attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
:attr:`devices` (if given) should all have same length. Moreover, each
element of :attr:`inputs` can either be a single object as the only argument
to a module, or a collection of positional arguments.
"""
assert len(modules) == len(inputs)
if kwargs_tup is not None:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
devices = [_get_device_index(x, True) for x in devices]
streams = [torch.cuda.current_stream(x) for x in devices]
lock = threading.Lock()
results = {}
grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled()
def _worker(i, module, input, kwargs, device=None, stream=None):
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
if stream is None:
stream = torch.cuda.current_stream(device)
try:
with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = module(*input, **kwargs)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(
where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, kwargs, device, stream))
for i, (module, input, kwargs, device, stream) in
enumerate(zip(modules, inputs, kwargs_tup, devices, streams))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
|
pytorch-master
|
torch/nn/parallel/parallel_apply.py
|
import warnings
import torch
from torch.cuda import nccl
from torch._utils import _take_tensors, _flatten_dense_tensors, \
_unflatten_dense_tensors, _reorder_tensors_as, _get_device_index, _handle_complex
from typing import List
def broadcast(tensor, devices=None, *, out=None):
r"""Broadcasts a tensor to specified GPU devices.
Args:
tensor (Tensor): tensor to broadcast. Can be on CPU or GPU.
devices (Iterable[torch.device, str or int], optional): an iterable of
GPU devices, among which to broadcast.
out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
store output results.
.. note::
Exactly one of :attr:`devices` and :attr:`out` must be specified.
Returns:
- If :attr:`devices` is specified,
a tuple containing copies of :attr:`tensor`, placed on
:attr:`devices`.
- If :attr:`out` is specified,
a tuple containing :attr:`out` tensors, each containing a copy of
:attr:`tensor`.
"""
tensor = _handle_complex(tensor)
if not ((devices is None) ^ (out is None)):
raise RuntimeError(
"Exactly one of 'devices' and 'out' must be specified, but got "
"devices={} and out={}".format(devices, out))
if devices is not None:
devices = [_get_device_index(d) for d in devices]
return torch._C._broadcast(tensor, devices)
else:
return torch._C._broadcast_out(tensor, out)
def broadcast_coalesced(tensors, devices, buffer_size=10485760):
"""Broadcasts a sequence tensors to the specified GPUs.
Small tensors are first coalesced into a buffer to reduce the number
of synchronizations.
Args:
tensors (sequence): tensors to broadcast. Must be on the same device,
either CPU or GPU.
devices (Iterable[torch.device, str or int]): an iterable of GPU
devices, among which to broadcast.
buffer_size (int): maximum size of the buffer used for coalescing
Returns:
A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`.
"""
devices = [_get_device_index(d) for d in devices]
tensors = [_handle_complex(t) for t in tensors]
return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
def reduce_add(inputs, destination=None):
"""Sums tensors from multiple GPUs.
All inputs should have matching shapes, dtype, and layout. The output tensor
will be of the same shape, dtype, and layout.
Args:
inputs (Iterable[Tensor]): an iterable of tensors to add.
destination (int, optional): a device on which the output will be
placed (default: current device).
Returns:
A tensor containing an elementwise sum of all inputs, placed on the
:attr:`destination` device.
"""
destination = _get_device_index(destination, optional=True)
input_size = inputs[0].size()
root_index = None # index of input tensor that already is on the correct device
for i, inp in enumerate(inputs):
assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs"
if inp.get_device() == destination:
root_index = i
if inp.size() != input_size:
got = 'x'.join(str(x) for x in inp.size())
expected = 'x'.join(str(x) for x in input_size)
raise ValueError("input {} has invalid size: got {}, but expected "
"{}".format(i, got, expected))
if root_index is None:
raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors")
if len(inputs) == 1:
return inputs[0]
if nccl.is_available(inputs):
result = torch.empty_like(inputs[root_index])
nccl.reduce(inputs, output=result, root=root_index)
else:
destination_device = torch.device(inputs[root_index].device.type, destination)
nonroot = [t for i, t in enumerate(inputs) if i != root_index]
# make a new tensor w/o clone
result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True)
for other in nonroot[1:]:
result.add_(other.to(device=destination_device, non_blocking=True))
return result
def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):
"""Sums tensors from multiple GPUs.
Small tensors are first coalesced into a buffer to reduce the number
of synchronizations.
Args:
inputs (Iterable[Iterable[Tensor]]): iterable of iterables that
contain tensors from a single device.
destination (int, optional): a device on which the output will be
placed (default: current device).
buffer_size (int): maximum size of the buffer used for coalescing
Returns:
A tuple of tensors containing an elementwise sum of each group of
inputs, placed on the ``destination`` device.
"""
# TODO: When `len(inputs) == 1` and all inputs are on `destination`, just
# return `inputs`.
dense_tensors: List[List] = [[] for _ in inputs] # shape (num_gpus, num_tensors)
output = []
ref_order = []
# process sparse ones first since they may have different sizes on different gpus
for tensor_at_gpus in zip(*inputs):
if all(t.is_sparse for t in tensor_at_gpus):
result = reduce_add(tensor_at_gpus, destination) # this will be sparse too
output.append(result)
ref_order.append(tensor_at_gpus[0])
else:
for coll, t in zip(dense_tensors, tensor_at_gpus):
coll.append(t.to_dense() if t.is_sparse else t)
ref_order.append(dense_tensors[0][-1])
itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]
# now the dense ones, which have consistent sizes
for chunks in zip(*itrs):
flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] # (num_gpus,)
flat_result = reduce_add(flat_tensors, destination)
for t in _unflatten_dense_tensors(flat_result, chunks[0]):
# The unflattened tensors do not share storage, and we don't expose
# base flat tensor anyways, so give them different version counters.
# See NOTE [ Version Counter in comm.*_coalesced ]
output.append(t.data)
return tuple(_reorder_tensors_as(output, ref_order))
def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None):
"""Scatters tensor across multiple GPUs.
Args:
tensor (Tensor): tensor to scatter. Can be on CPU or GPU.
devices (Iterable[torch.device, str or int], optional): an iterable of
GPU devices, among which to scatter.
chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on
each device. It should match :attr:`devices` in length and sums to
``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided
into equal chunks.
dim (int, optional): A dimension along which to chunk :attr:`tensor`.
Default: ``0``.
streams (Iterable[Stream], optional): an iterable of Streams, among
which to execute the scatter. If not specified, the default stream will
be utilized.
out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
store output results. Sizes of these tensors must match that of
:attr:`tensor`, except for :attr:`dim`, where the total size must
sum to ``tensor.size(dim)``.
.. note::
Exactly one of :attr:`devices` and :attr:`out` must be specified. When
:attr:`out` is specified, :attr:`chunk_sizes` must not be specified and
will be inferred from sizes of :attr:`out`.
Returns:
- If :attr:`devices` is specified,
a tuple containing chunks of :attr:`tensor`, placed on
:attr:`devices`.
- If :attr:`out` is specified,
a tuple containing :attr:`out` tensors, each containing a chunk of
:attr:`tensor`.
"""
tensor = _handle_complex(tensor)
if out is None:
devices = [_get_device_index(d) for d in devices]
return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))
else:
if devices is not None:
raise RuntimeError(
"'devices' must not be specified when 'out' is specified, but "
"got devices={}".format(devices))
if chunk_sizes is not None:
raise RuntimeError(
"'chunk_sizes' must not be specified when 'out' is specified, "
"but got chunk_sizes={}".format(chunk_sizes))
return tuple(torch._C._scatter_out(tensor, out, dim, streams))
def gather(tensors, dim=0, destination=None, *, out=None):
r"""Gathers tensors from multiple GPU devices.
Args:
tensors (Iterable[Tensor]): an iterable of tensors to gather.
Tensor sizes in all dimensions other than :attr:`dim` have to match.
dim (int, optional): a dimension along which the tensors will be
concatenated. Default: ``0``.
destination (torch.device, str, or int, optional): the output device.
Can be CPU or CUDA. Default: the current CUDA device.
out (Tensor, optional, keyword-only): the tensor to store gather result.
Its sizes must match those of :attr:`tensors`, except for :attr:`dim`,
where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``.
Can be on CPU or CUDA.
.. note::
:attr:`destination` must not be specified when :attr:`out` is specified.
Returns:
- If :attr:`destination` is specified,
a tensor located on :attr:`destination` device, that is a result of
concatenating :attr:`tensors` along :attr:`dim`.
- If :attr:`out` is specified,
the :attr:`out` tensor, now containing results of concatenating
:attr:`tensors` along :attr:`dim`.
"""
tensors = [_handle_complex(t) for t in tensors]
if out is None:
if destination == -1:
warnings.warn(
'Using -1 to represent CPU tensor is deprecated. Please use a '
'device object or string instead, e.g., "cpu".')
destination = _get_device_index(destination, allow_cpu=True, optional=True)
return torch._C._gather(tensors, dim, destination)
else:
if destination is not None:
raise RuntimeError(
"'destination' must not be specified when 'out' is specified, but "
"got destination={}".format(destination))
return torch._C._gather_out(tensors, out, dim)
|
pytorch-master
|
torch/nn/parallel/comm.py
|
import torch
from ._functions import Scatter, Gather
import warnings
__all__ = ['scatter', 'scatter_kwargs', 'gather']
def is_namedtuple(obj):
# Check if type was created from collections.namedtuple or a typing.NamedTuple.
warnings.warn("is_namedtuple is deprecated, please use the python checks instead")
return _is_namedtuple(obj)
def _is_namedtuple(obj):
# Check if type was created from collections.namedtuple or a typing.NamedTuple.
return (
isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields")
)
def scatter(inputs, target_gpus, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
return Scatter.apply(target_gpus, None, dim, obj)
if _is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(scatter_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return [list(i) for i in zip(*map(scatter_map, obj))]
if isinstance(obj, dict) and len(obj) > 0:
return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))]
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
res = scatter_map(inputs)
finally:
scatter_map = None
return res
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend(() for _ in range(len(kwargs) - len(inputs)))
elif len(kwargs) < len(inputs):
kwargs.extend({} for _ in range(len(inputs) - len(kwargs)))
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(outputs, target_device, dim=0):
r"""
Gathers tensors from different GPUs on a specified device.
Use 'cpu' for CPU to avoid a deprecation warning.
"""
def gather_map(outputs):
out = outputs[0]
if isinstance(out, torch.Tensor):
return Gather.apply(target_device, dim, *outputs)
if out is None:
return None
if isinstance(out, dict):
if not all(len(out) == len(d) for d in outputs):
raise ValueError('All dicts must have the same number of keys')
return type(out)((k, gather_map([d[k] for d in outputs]))
for k in out)
if _is_namedtuple(out):
return type(out)._make(map(gather_map, zip(*outputs)))
return type(out)(map(gather_map, zip(*outputs)))
# Recursive function calls like this create reference cycles.
# Setting the function to None clears the refcycle.
try:
res = gather_map(outputs)
finally:
gather_map = None
return res
|
pytorch-master
|
torch/nn/parallel/scatter_gather.py
|
from . import comm
from torch._utils import _get_device_index
from collections import OrderedDict
def _is_script_module(module):
import torch.jit
return isinstance(module, torch.jit.ScriptModule)
def _is_script_method(module):
import torch.jit
return isinstance(module, torch._C.ScriptMethod)
def _init_script_module():
import torch.jit
return torch.jit.ScriptModule()
def _is_jit_enabled():
import torch.jit
return torch.jit._state._enabled
# Check if we can safely replicate the module.
# there are two types of module:
# 1. python modules
# 2. ScriptModule
#
# currently a module cannot be replicated properly if the descendants of
# any ScriptModule contains python module (type 1 above)
def _replicatable_module(module, memo=None):
# module.modules() contains module itself as the first element
def descendant_modules(module):
gen = module.modules()
next(gen)
return gen
if not _is_jit_enabled():
return True
if memo is None:
memo = set()
# memoize visited modules
memo.add(module)
if _is_script_module(module):
memo.update(descendant_modules(module))
return all(_is_script_module(descendant) for
descendant in descendant_modules(module))
for child in module.children():
# since any unreplicatable module will cause the check to return
# False early, visited modules here can be safely ignored.
if child in memo:
continue
if not _replicatable_module(child, memo):
return False
return True
def _broadcast_coalesced_reshape(tensors, devices, detach=False):
from ._functions import Broadcast
if detach:
return comm.broadcast_coalesced(tensors, devices)
else:
# Use the autograd function to broadcast if not detach
if len(tensors) > 0:
tensor_copies = Broadcast.apply(devices, *tensors)
return [tensor_copies[i:i + len(tensors)]
for i in range(0, len(tensor_copies), len(tensors))]
else:
return []
def replicate(network, devices, detach=False):
if not _replicatable_module(network):
raise RuntimeError("Cannot replicate network where python modules are "
"childrens of ScriptModule")
if not devices:
return []
devices = [_get_device_index(x, True) for x in devices]
num_replicas = len(devices)
params = list(network.parameters())
param_indices = {param: idx for idx, param in enumerate(params)}
param_copies = _broadcast_coalesced_reshape(params, devices, detach)
buffers = list(network.buffers())
buffers_rg = []
buffers_not_rg = []
for buf in buffers:
if buf.requires_grad and not detach:
buffers_rg.append(buf)
else:
buffers_not_rg.append(buf)
buffer_indices_rg = {buf: idx for idx, buf in enumerate(buffers_rg)}
buffer_indices_not_rg = {buf: idx for idx, buf in enumerate(buffers_not_rg)}
buffer_copies_rg = _broadcast_coalesced_reshape(buffers_rg, devices, detach=detach)
buffer_copies_not_rg = _broadcast_coalesced_reshape(buffers_not_rg, devices, detach=True)
modules = list(network.modules())
module_copies = [[] for device in devices]
module_indices = {}
for i, module in enumerate(modules):
module_indices[module] = i
for j in range(num_replicas):
replica = module._replicate_for_data_parallel()
# This is a temporary fix for DDP. DDP needs to access the
# replicated model parameters. It used to do so through
# `mode.parameters()`. The fix added in #33907 for DP stops the
# `parameters()` API from exposing the replicated parameters.
# Hence, we add a `_former_parameters` dict here to support DDP.
replica._former_parameters = OrderedDict()
module_copies[j].append(replica)
for i, module in enumerate(modules):
for key, child in module._modules.items():
if child is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._modules[key] = None
else:
module_idx = module_indices[child]
for j in range(num_replicas):
replica = module_copies[j][i]
setattr(replica, key, module_copies[j][module_idx])
for key, param in module._parameters.items():
if param is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._parameters[key] = None
else:
param_idx = param_indices[param]
for j in range(num_replicas):
replica = module_copies[j][i]
param = param_copies[j][param_idx]
# parameters in replicas are no longer leaves,
# so setattr them as non-parameter attributes
setattr(replica, key, param)
# expose the parameter for DDP
replica._former_parameters[key] = param
for key, buf in module._buffers.items():
if buf is None:
for j in range(num_replicas):
replica = module_copies[j][i]
replica._buffers[key] = None
else:
if buf.requires_grad and not detach:
buffer_copies = buffer_copies_rg
buffer_idx = buffer_indices_rg[buf]
else:
buffer_copies = buffer_copies_not_rg
buffer_idx = buffer_indices_not_rg[buf]
for j in range(num_replicas):
replica = module_copies[j][i]
setattr(replica, key, buffer_copies[j][buffer_idx])
return [module_copies[j][0] for j in range(num_replicas)]
|
pytorch-master
|
torch/nn/parallel/replicate.py
|
import warnings
import torch
from . import comm
from torch.autograd import Function
from torch._utils import _get_device_index
from typing import List, Optional
class Broadcast(Function):
@staticmethod
def forward(ctx, target_gpus, *inputs):
assert all(i.device.type != 'cpu' for i in inputs), (
'Broadcast function not implemented for CPU tensors'
)
target_gpus = [_get_device_index(x, True) for x in target_gpus]
ctx.target_gpus = target_gpus
if len(inputs) == 0:
return tuple()
ctx.num_inputs = len(inputs)
ctx.input_device = inputs[0].get_device()
outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
non_differentiables = []
for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):
if not input_requires_grad:
for output in outputs:
non_differentiables.append(output[idx])
ctx.mark_non_differentiable(*non_differentiables)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *grad_outputs):
return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)
class ReduceAddCoalesced(Function):
@staticmethod
def forward(ctx, destination, num_inputs, *grads):
ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
grads_ = [grads[i:i + num_inputs]
for i in range(0, len(grads), num_inputs)]
return comm.reduce_add_coalesced(grads_, destination)
@staticmethod
def backward(ctx, *grad_outputs):
return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
class Gather(Function):
@staticmethod
def forward(ctx, target_device, dim, *inputs):
assert all(i.device.type != 'cpu' for i in inputs), (
'Gather function not implemented for CPU tensors'
)
if (target_device == 'cpu'):
ctx.target_device = 'cpu'
else:
target_device = _get_device_index(target_device, True)
ctx.target_device = target_device
ctx.dim = dim
ctx.input_gpus = tuple(i.get_device() for i in inputs)
if all(t.dim() == 0 for t in inputs) and dim == 0:
inputs = tuple(t.view(1) for t in inputs)
warnings.warn('Was asked to gather along dimension 0, but all '
'input tensors were scalars; will instead unsqueeze '
'and return a vector.')
ctx.unsqueezed_scalar = True
else:
ctx.unsqueezed_scalar = False
ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs)
return comm.gather(inputs, ctx.dim, ctx.target_device)
@staticmethod
def backward(ctx, grad_output):
scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
if ctx.unsqueezed_scalar:
scattered_grads = tuple(g[0] for g in scattered_grads)
return (None, None) + scattered_grads
class Scatter(Function):
@staticmethod
def forward(ctx, target_gpus, chunk_sizes, dim, input):
target_gpus = [_get_device_index(x, True) for x in target_gpus]
ctx.dim = dim
ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
streams = None
if torch.cuda.is_available() and ctx.input_device == -1:
# Perform CPU to GPU copies in a background stream
streams = [_get_stream(device) for device in target_gpus]
outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)
# Synchronize with the copy stream
if streams is not None:
for i, output in enumerate(outputs):
with torch.cuda.device(target_gpus[i]):
main_stream = torch.cuda.current_stream()
main_stream.wait_stream(streams[i])
output.record_stream(main_stream)
return outputs
@staticmethod
def backward(ctx, *grad_output):
return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
# background streams used for copying
_streams: Optional[List[Optional[torch.cuda.Stream]]] = None
def _get_stream(device: int):
"""Gets a background stream for copying between CPU and GPU"""
global _streams
if device == -1:
return None
if _streams is None:
_streams = [None] * torch.cuda.device_count()
if _streams[device] is None:
_streams[device] = torch.cuda.Stream(device)
return _streams[device]
|
pytorch-master
|
torch/nn/parallel/_functions.py
|
import operator
import torch
import warnings
from itertools import chain
from ..modules import Module
from .scatter_gather import scatter_kwargs, gather
from .replicate import replicate
from .parallel_apply import parallel_apply
from torch._utils import (
_get_all_device_indices,
_get_available_device_type,
_get_device_index,
_get_devices_properties
)
__all__ = ['DataParallel', 'data_parallel']
def _check_balance(device_ids):
imbalance_warn = """
There is an imbalance between your GPUs. You may want to exclude GPU {} which
has less than 75% of the memory or cores of GPU {}. You can do so by setting
the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
environment variable."""
device_ids = [_get_device_index(x, True) for x in device_ids]
dev_props = _get_devices_properties(device_ids)
def warn_imbalance(get_prop):
values = [get_prop(props) for props in dev_props]
min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
if min_val / max_val < 0.75:
warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
return True
return False
if warn_imbalance(lambda props: props.total_memory):
return
if warn_imbalance(lambda props: props.multi_processor_count):
return
class DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given :attr:`module` by
splitting the input across the specified devices by chunking in the batch
dimension (other objects will be copied once per device). In the forward
pass, the module is replicated on each device, and each replica handles a
portion of the input. During the backwards pass, gradients from each replica
are summed into the original module.
The batch size should be larger than the number of GPUs used.
.. warning::
It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
instead of this class, to do multi-GPU training, even if there is only a single
node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel but some types are specially handled. tensors will be
**scattered** on dim specified (default 0). tuple, list and dict types will
be shallow copied. The other types will be shared among different threads
and can be corrupted if written to in the model's forward pass.
The parallelized :attr:`module` must have its parameters and buffers on
``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
module.
.. warning::
In each forward, :attr:`module` is **replicated** on each device, so any
updates to the running module in ``forward`` will be lost. For example,
if :attr:`module` has a counter attribute that is incremented in each
``forward``, it will always stay at the initial value because the update
is done on the replicas which are destroyed after ``forward``. However,
:class:`~torch.nn.DataParallel` guarantees that the replica on
``device[0]`` will have its parameters and buffers sharing storage with
the base parallelized :attr:`module`. So **in-place** updates to the
parameters or buffers on ``device[0]`` will be recorded. E.g.,
:class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm`
rely on this behavior to update the buffers.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
will be invoked ``len(device_ids)`` times, each with inputs located on
a particular device. Particularly, the hooks are only guaranteed to be
executed in correct order with respect to operations on corresponding
devices. For example, it is not guaranteed that hooks set via
:meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
`all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
that each such hook be executed before the corresponding
:meth:`~torch.nn.Module.forward` call of that device.
.. warning::
When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
:func:`forward`, this wrapper will return a vector of length equal to
number of devices used in data parallelism, containing the result from
each device.
.. note::
There is a subtlety in using the
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
:class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
details.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices (default: all devices)
output_device (int or torch.device): device location of output (default: device_ids[0])
Attributes:
module (Module): the module to be parallelized
Example::
>>> # xdoctest: +SKIP
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var) # input_var can be on any device, including CPU
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallel, self).__init__()
torch._C._log_api_usage_once("torch.nn.parallel.DataParallel")
device_type = _get_available_device_type()
if device_type is None:
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = _get_all_device_indices()
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = [_get_device_index(x, True) for x in device_ids]
self.output_device = _get_device_index(output_device, True)
self.src_device_obj = torch.device(device_type, self.device_ids[0])
_check_balance(self.device_ids)
if len(self.device_ids) == 1:
self.module.to(self.src_device_obj)
def forward(self, *inputs, **kwargs):
with torch.autograd.profiler.record_function("DataParallel.forward"):
if not self.device_ids:
return self.module(*inputs, **kwargs)
for t in chain(self.module.parameters(), self.module.buffers()):
if t.device != self.src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(self.src_device_obj, t.device))
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
# for forward function without any inputs, empty list and dict will be created
# so the module can be executed on one device which is the first one in device_ids
if not inputs and not kwargs:
inputs = ((),)
kwargs = ({},)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids, not torch.is_grad_enabled())
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module (Module): the module to evaluate in parallel
inputs (Tensor): inputs to the module
device_ids (list of int or torch.device): GPU ids on which to replicate module
output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Tensor containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,) if inputs is not None else ()
device_type = _get_available_device_type()
if device_ids is None:
device_ids = _get_all_device_indices()
if output_device is None:
output_device = device_ids[0]
device_ids = [_get_device_index(x, True) for x in device_ids]
output_device = _get_device_index(output_device, True)
src_device_obj = torch.device(device_type, device_ids[0])
for t in chain(module.parameters(), module.buffers()):
if t.device != src_device_obj:
raise RuntimeError("module must have its parameters and buffers "
"on device {} (device_ids[0]) but found one of "
"them on device: {}".format(src_device_obj, t.device))
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
# for module without any inputs, empty list and dict will be created
# so the module can be executed on one device which is the first one in device_ids
if not inputs and not module_kwargs:
inputs = ((),)
module_kwargs = ({},)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
|
pytorch-master
|
torch/nn/parallel/data_parallel.py
|
from .parallel_apply import parallel_apply
from .replicate import replicate
from .data_parallel import DataParallel, data_parallel
from .scatter_gather import scatter, gather
from .distributed import DistributedDataParallel
__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel',
'DataParallel', 'DistributedDataParallel']
def DistributedDataParallelCPU(*args, **kwargs):
import warnings
warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, "
"please use torch.nn.parallel.DistributedDataParallel instead.")
return DistributedDataParallel(*args, **kwargs)
|
pytorch-master
|
torch/nn/parallel/__init__.py
|
import sys
import copy
from dataclasses import dataclass
from typing import Callable, Any, Type
from enum import Enum, auto
import inspect
import itertools
import logging
import os
import warnings
from contextlib import contextmanager
import torch
import torch.distributed as dist
from torch.autograd import Function, Variable
from torch.distributed.algorithms.join import (
Join,
Joinable,
JoinHook,
)
from torch.utils._pytree import tree_flatten, tree_unflatten
RPC_AVAILABLE = False
if dist.is_available():
from torch.distributed.utils import (
_verify_param_shape_across_processes,
_sync_module_states,
_to_kwargs,
)
from torch.distributed.distributed_c10d import ReduceOp, _get_default_group
if torch.distributed.rpc.is_available():
RPC_AVAILABLE = True
from torch.distributed.rpc import RRef
from torch._utils import _get_device_index
from ..modules import Module
from ._replicated_tensor_ddp_utils import _ddp_with_replicated_tensor_enabled
from .scatter_gather import gather, is_namedtuple, scatter_kwargs # noqa: F401
__all__ = ['DistributedDataParallel']
logger = logging.getLogger(__name__)
def _tree_flatten_with_rref(output):
output_is_rref = RPC_AVAILABLE and isinstance(output, RRef)
if output_is_rref:
output_tensor_list, treespec = tree_flatten(output.local_value())
else:
output_tensor_list, treespec = tree_flatten(output)
# Need to return flattened tensors, spec to re-pack them, as well
# as if the return type was actually an RRef to reconstruct.
return output_tensor_list, treespec, output_is_rref
def _tree_unflatten_with_rref(output, treespec, output_is_rref):
output = tree_unflatten(output, treespec)
if output_is_rref:
output = RRef(output)
return output
def _find_tensors(obj):
r"""
Recursively find all tensors contained in the specified object.
"""
if RPC_AVAILABLE and isinstance(obj, RRef):
# If the current node is the owner of the RRef, unwrap it and try to
# find Tensors.
# TODO: Expand to remote RRefs.
if obj.is_owner():
return _find_tensors(obj.local_value())
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
def _dump_DDP_relevant_env_vars():
relevant_env_vars = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"MASTER_PORT",
"MASTER_ADDR",
"CUDA_VISIBLE_DEVICES",
"GLOO_SOCKET_IFNAME",
"GLOO_DEVICE_TRANSPORT",
"NCCL_SOCKET_IFNAME",
"NCCL_BLOCKING_WAIT",
"NCCL_DEBUG",
"NCCL_DEBUG_SUBSYS",
"NCCL_IB_DISABLE",
# More NCCL env vars:
"NCCL_P2P_DISABLE",
"NCCL_P2P_LEVEL",
"NCCL_SHM_DISABLE",
"NCCL_SOCKET_NTHREADS",
"NCCL_NSOCKS_PERTHREAD",
"NCCL_BUFFSIZE",
"NCCL_NTHREADS",
"NCCL_RINGS",
"NCCL_MAX_NCHANNELS",
"NCCL_MIN_NCHANNELS",
"NCCL_CHECKS_DISABLE",
"NCCL_CHECK_POINTERS",
"NCCL_LAUNCH_MODE",
"NCCL_IB_HCA",
"NCCL_IB_TIMEOUT",
"NCCL_IB_RETRY_CNT",
"NCCL_IB_GID_INDEX",
"NCCL_IB_SL",
"NCCL_IB_TC",
"NCCL_IB_AR_THRESHOLD",
"NCCL_IB_CUDA_SUPPORT",
"NCCL_NET_GDR_LEVEL",
"NCCL_NET_GDR_READ",
"NCCL_SINGLE_RING_THRESHOLD",
"NCCL_LL_THRESHOLD",
"NCCL_TREE_THRESHOLD",
"NCCL_ALGO",
"NCCL_PROTO",
"NCCL_IGNORE_CPU_AFFINITY",
"NCCL_DEBUG_FILE",
"NCCL_COLLNET_ENABLE",
"NCCL_TOPO_FILE",
"NCCL_TOPO_DUMP_FILE",
"NCCL_ASYNC_ERROR_HANDLING",
]
formatted_output = ""
for var in relevant_env_vars:
value = os.environ[var] if var in os.environ else "N/A"
formatted_output += "env:%s=%s\n" % (var, value)
print(formatted_output)
class _BufferCommHookLocation(Enum):
PRE_FORWARD = auto()
POST_FORWARD = auto()
@dataclass
class _BufferCommHook:
buffer_comm_hook: Callable
buffer_comm_hook_state: Any
buffer_comm_hook_location: _BufferCommHookLocation
# Add a DDPSink to run various functions when backwards starts, such as
# queueing call back of out-most backward/graph task,
# this helps call back is fired after all gradients' calculation
# is completed.
class _DDPSink(Function):
@staticmethod
def forward(ctx, reducer, state_dict, *inputs):
# set_materialize_grads(False) will ensure that None gradients stay as
# None and are not filled with zeros.
ctx.set_materialize_grads(False)
ctx.reducer = reducer
ctx.state_dict = state_dict
ret = tuple(
inp.clone()
if isinstance(inp, torch.Tensor)
else inp
for inp in inputs
)
return ret
@staticmethod
def backward(ctx, *grad_outputs):
state_dict = ctx.state_dict
# Enqueue delay allreduce for static graph training on the first
# iteration.
if ctx.state_dict['static_graph'] and ctx.state_dict['num_iterations'] == 1:
Variable._execution_engine.queue_callback(ctx.reducer._delay_all_reduce)
return (None, None, *grad_outputs)
class _DDPJoinHook(JoinHook):
def __init__(self, ddp, divide_by_initial_world_size):
"""
Sets config variables for internal usage.
"""
assert isinstance(ddp, DistributedDataParallel), (
"DDP join hook requires passing in a DistributedDataParallel "
"instance as the state"
)
ddp.logger._set_uneven_input_join()
self.ddp = ddp
self.ddp._divide_by_initial_world_size = divide_by_initial_world_size
super().__init__()
def main_hook(self):
"""
Shadows the DDP collective communication operations in the forward and
backward passes.
"""
ddp = self.ddp
# Buckets are rebuilt only once during a training period
ddp.reducer._rebuild_buckets()
# Schedule a broadcast if we are syncing module buffers in the
# forward pass
# TODO: make DDP uneven inputs context manager support buffer
# comm hook (https://github.com/pytorch/pytorch/issues/65436)
ddp._check_and_sync_module_buffers()
# Check if need to sync in the backward pass
work = ddp._check_global_requires_backward_grad_sync(is_joined_rank=True)
work.wait()
should_sync_backwards = work.result()[0].item() != 0
# Forward parameter sync is disabled in the next iteration if we
# are skipping gradient sync this iteration, so set
# `require_forward_param_sync` accordingly
ddp.require_forward_param_sync = should_sync_backwards
if not should_sync_backwards:
return
# Schedule one allreduce per gradient bucket to match the backward
# pass allreduce
ddp._match_all_reduce_for_bwd_pass()
# Check if we need to allreduce locally unused parameters
if ddp.find_unused_parameters:
ddp._match_unused_params_allreduce()
# Rebuilt parameters are pushed only once during a training period
ddp.reducer._push_all_rebuilt_params()
def post_hook(self, is_last_joiner: bool):
"""
Syncs the final model to ensure that the model is the same across all
processes.
"""
self.ddp._sync_final_model(is_last_joiner)
class DistributedDataParallel(Module, Joinable):
r"""Implements distributed data parallelism that is based on
``torch.distributed`` package at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine and each device, and
each such replica handles a portion of the input. During the backwards
pass, gradients from each node are averaged.
The batch size should be larger than the number of GPUs used locally.
See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.
The same constraints on input as in :class:`torch.nn.DataParallel` apply.
Creation of this class requires that ``torch.distributed`` to be already
initialized, by calling :func:`torch.distributed.init_process_group`.
``DistributedDataParallel`` is proven to be significantly faster than
:class:`torch.nn.DataParallel` for single-node multi-GPU data
parallel training.
To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn
up ``N`` processes, ensuring that each process exclusively works on a single
GPU from 0 to N-1. This can be done by either setting
``CUDA_VISIBLE_DEVICES`` for every process or by calling:
>>> # xdoctest: +SKIP("undefined variables")
>>> torch.cuda.set_device(i)
where i is from 0 to N-1. In each process, you should refer the following
to construct this module:
>>> # xdoctest: +SKIP("undefined variables")
>>> torch.distributed.init_process_group(
>>> backend='nccl', world_size=N, init_method='...'
>>> )
>>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)
In order to spawn up multiple processes per node, you can use either
``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.
.. note::
Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__
for a brief introduction to all features related to distributed training.
.. note::
``DistributedDataParallel`` can be used in conjunction with
:class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce
per-rank optimizer states memory footprint. Please refer to
`ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__
for more details.
.. note:: ``nccl`` backend is currently the fastest and highly recommended
backend when using GPUs. This applies to both single-node and
multi-node distributed training.
.. note:: This module also supports mixed-precision distributed training.
This means that your model can have different types of parameters such
as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these
mixed types of parameters will just work fine.
.. note:: If you use ``torch.save`` on one process to checkpoint the module,
and ``torch.load`` on some other processes to recover it, make sure that
``map_location`` is configured properly for every process. Without
``map_location``, ``torch.load`` would recover the module to devices
where the module was saved from.
.. note:: When a model is trained on ``M`` nodes with ``batch=N``, the
gradient will be ``M`` times smaller when compared to the same model
trained on a single node with ``batch=M*N`` if the loss is summed (NOT
averaged as usual) across instances in a batch (because the gradients
between different nodes are averaged). You should take this into
consideration when you want to obtain a mathematically equivalent
training process compared to the local training counterpart. But in most
cases, you can just treat a DistributedDataParallel wrapped model, a
DataParallel wrapped model and an ordinary model on a single GPU as the
same (E.g. using the same learning rate for equivalent batch size).
.. note::
Parameters are never broadcast between processes. The module performs
an all-reduce step on gradients and assumes that they will be modified
by the optimizer in all processes in the same way. Buffers
(e.g. BatchNorm stats) are broadcast from the module in process of rank
0, to all other replicas in the system in every iteration.
.. note::
If you are using DistributedDataParallel in conjunction with the
:ref:`distributed-rpc-framework`, you should always use
:meth:`torch.distributed.autograd.backward` to compute gradients and
:class:`torch.distributed.optim.DistributedOptimizer` for optimizing
parameters.
.. note::
DistributedDataParallel currently offers limited support for gradient
checkpointing with :meth:`torch.utils.checkpoint`. DDP will work as
expected when there are no unused parameters in the model and each layer
is checkpointed at most once (make sure you are not passing
`find_unused_parameters=True` to DDP). We currently do not support the
case where a layer is checkpointed multiple times, or when there unused
parameters in the checkpointed model.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch.distributed.autograd as dist_autograd
>>> from torch.nn.parallel import DistributedDataParallel as DDP
>>> import torch
>>> from torch import optim
>>> from torch.distributed.optim import DistributedOptimizer
>>> import torch.distributed.rpc as rpc
>>> from torch.distributed.rpc import RRef
>>>
>>> t1 = torch.rand((3, 3), requires_grad=True)
>>> t2 = torch.rand((3, 3), requires_grad=True)
>>> rref = rpc.remote("worker1", torch.add, args=(t1, t2))
>>> ddp_model = DDP(my_model)
>>>
>>> # Setup optimizer
>>> optimizer_params = [rref]
>>> for param in ddp_model.parameters():
>>> optimizer_params.append(RRef(param))
>>>
>>> dist_optim = DistributedOptimizer(
>>> optim.SGD,
>>> optimizer_params,
>>> lr=0.05,
>>> )
>>>
>>> with dist_autograd.context() as context_id:
>>> pred = ddp_model(rref.to_here())
>>> loss = loss_func(pred, target)
>>> dist_autograd.backward(context_id, [loss])
>>> dist_optim.step(context_id)
.. note::
To let a non-DDP model load a state dict from a DDP model,
:meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`
needs to be applied to strip the prefix "module." in the DDP state dict before loading.
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) are distributed synchronization
points. Take that into account in case different processes might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
Same applies to buffers.
.. warning::
This module assumes all parameters are registered in the model of each
distributed processes are in the same order. The module itself will
conduct gradient ``allreduce`` following the reverse order of the
registered parameters of the model. In other words, it is users'
responsibility to ensure that each distributed process has the exact
same model and thus the exact same parameter registration order.
.. warning::
This module allows parameters with non-rowmajor-contiguous strides.
For example, your model may contain some parameters whose
:class:`torch.memory_format` is ``torch.contiguous_format``
and others whose format is ``torch.channels_last``. However,
corresponding parameters in different processes must have the
same strides.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. warning::
If you plan on using this module with a ``nccl`` backend or a ``gloo``
backend (that uses Infiniband), together with a DataLoader that uses
multiple workers, please change the multiprocessing start method to
``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
likely experience deadlocks if you don't change this setting.
.. warning::
You should never try to change your model's parameters after wrapping
up your model with ``DistributedDataParallel``. Because, when
wrapping up your model with ``DistributedDataParallel``, the constructor
of ``DistributedDataParallel`` will register the additional gradient
reduction functions on all the parameters of the model itself at the
time of construction. If you change the model's parameters afterwards,
gradient redunction functions no longer match the correct set of
parameters.
.. warning::
Using ``DistributedDataParallel`` in conjunction with the
:ref:`distributed-rpc-framework` is experimental and subject to change.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices.
1) For single-device modules, ``device_ids`` can
contain exactly one device id, which represents the only
CUDA device where the input module corresponding to this process resides.
Alternatively, ``device_ids`` can also be ``None``.
2) For multi-device modules and CPU modules,
``device_ids`` must be ``None``.
When ``device_ids`` is ``None`` for both cases,
both the input data for the forward pass and the actual module
must be placed on the correct device.
(default: ``None``)
output_device (int or torch.device): Device location of output for
single-device CUDA modules. For multi-device modules and
CPU modules, it must be ``None``, and the module itself
dictates the output location. (default: ``device_ids[0]``
for single-device modules)
broadcast_buffers (bool): Flag that enables syncing (broadcasting)
buffers of the module at beginning of the ``forward``
function. (default: ``True``)
process_group: The process group to be used for distributed data
all-reduction. If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into
multiple buckets so that gradient reduction of each
bucket can potentially overlap with backward computation.
:attr:`bucket_cap_mb` controls the bucket size in
MegaBytes (MB). (default: 25)
find_unused_parameters (bool): Traverse the autograd graph from all
tensors contained in the return value of the
wrapped module's ``forward`` function. Parameters
that don't receive gradients as part of this
graph are preemptively marked as being ready to
be reduced. In addition, parameters that may have
been used in the wrapped module's ``forward``
function but were not part of loss computation and
thus would also not receive gradients are
preemptively marked as ready to be reduced.
(default: ``False``)
check_reduction: This argument is deprecated.
gradient_as_bucket_view (bool): When set to ``True``, gradients will be views
pointing to different offsets of ``allreduce`` communication
buckets. This can reduce peak memory usage, where the
saved memory size will be equal to the total gradients
size. Moreover, it avoids the overhead of copying between
gradients and ``allreduce`` communication buckets. When
gradients are views, ``detach_()`` cannot be called on the
gradients. If hitting such errors, please fix it by
referring to the :meth:`~torch.optim.Optimizer.zero_grad`
function in ``torch/optim/optimizer.py`` as a solution.
Note that gradients will be views after first iteration, so
the peak memory saving should be checked after first iteration.
static_graph (bool): When set to ``True``, DDP knows the trained graph is
static. Static graph means 1) The set of used and unused
parameters will not change during the whole training loop; in
this case, it does not matter whether users set
``find_unused_parameters = True`` or not. 2) How the graph is trained
will not change during the whole training loop (meaning there is
no control flow depending on iterations).
When static_graph is set to be ``True``, DDP will support cases that
can not be supported in the past:
1) Reentrant backwards.
2) Activation checkpointing multiple times.
3) Activation checkpointing when model has unused parameters.
4) There are model parameters that are outside of forward function.
5) Potentially improve performance when there are unused parameters,
as DDP will not search graph in each iteraton to detect unused
parameters when static_graph is set to be ``True``.
To check whether you can set static_graph to be ``True``, one way is to
check ddp logging data at the end of your previous model training,
if ``ddp_logging_data.get("can_set_static_graph") == True``, mostly you
can set ``static_graph = True`` as well.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> model_DDP = torch.nn.parallel.DistributedDataParallel(model)
>>> # Training loop
>>> ...
>>> ddp_logging_data = model_DDP._get_ddp_logging_data()
>>> static_graph = ddp_logging_data.get("can_set_static_graph")
Attributes:
module (Module): the module to be parallelized.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
>>> net = torch.nn.parallel.DistributedDataParallel(model)
"""
def __init__(
self,
module,
device_ids=None,
output_device=None,
dim=0,
broadcast_buffers=True,
process_group=None,
bucket_cap_mb=25,
find_unused_parameters=False,
check_reduction=False,
gradient_as_bucket_view=False,
static_graph=False,
):
super(DistributedDataParallel, self).__init__()
Joinable.__init__(self)
self.logger = None
if not any((p.requires_grad for p in module.parameters())):
self._log_and_throw(
RuntimeError,
"DistributedDataParallel is not needed when a module "
"doesn't have any parameter that requires a gradient.",
)
if device_ids is not None and len(device_ids) > 1:
self._log_and_throw(
ValueError, "device_ids can only be None or contain a single element."
)
self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1
distinct_device_types = {p.device.type for p in module.parameters()}
if len(distinct_device_types) != 1:
self._log_and_throw(
ValueError,
"DistributedDataParallel's input module must be on "
"the same type of devices, but input module parameters locate in {}.".format(
distinct_device_types
),
)
self.device_type = list(distinct_device_types)[0]
if (
device_ids is None
or len(device_ids) == 0 # For backward compatibility.
or self.device_type == "cpu"
or self.is_multi_device_module
):
if device_ids or output_device:
self._log_and_throw(
ValueError,
"DistributedDataParallel device_ids and output_device arguments "
"only work with single-device/multiple-device GPU modules or CPU modules, "
"but got device_ids {}, output_device {}, and module parameters {}.".format(
device_ids,
output_device,
{p.device for p in module.parameters()},
),
)
self.device_ids = None
self.output_device = None
else:
self.device_ids = [_get_device_index(x, True) for x in device_ids]
if output_device is None:
output_device = device_ids[0]
self.output_device = _get_device_index(output_device, True)
if process_group is None:
self.process_group = _get_default_group()
else:
self.process_group = process_group
self.static_graph = False
self.dim = dim
self.module = module
self.device = list(self.module.parameters())[0].device
self.broadcast_buffers = broadcast_buffers
self.find_unused_parameters = find_unused_parameters
self.require_backward_grad_sync = True
self.require_forward_param_sync = True
self.gradient_as_bucket_view = gradient_as_bucket_view
if hasattr(module, "_ddp_params_and_buffers_to_ignore"):
self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore
else:
self.parameters_to_ignore = []
self._use_replicated_tensor_module = _ddp_with_replicated_tensor_enabled()
self._build_replicated_tensor_module()
if check_reduction:
# This argument is no longer used since the reducer
# will ensure reduction completes even if some parameters
# do not receive gradients.
warnings.warn(
"The `check_reduction` argument in `DistributedDataParallel` "
"module is deprecated. Please avoid using it."
)
# Check that a module does not have Uninitialized parameters
for param in module.parameters():
if isinstance(param, torch.nn.parameter.UninitializedParameter):
self._log_and_throw(
RuntimeError,
"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. "
"Run a dummy forward pass to correctly initialize the modules",
)
# used for intra-node param sync and inter-node sync as well
self.broadcast_bucket_size = int(250 * 1024 * 1024)
# reduction bucket size
self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)
# Whether to perform input tensor CPU to GPU copies on a side-stream
self.use_side_stream_for_tensor_copies = (
os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1"
)
# Build parameters for reducer.
parameters, expect_sparse_gradient = self._build_params_for_reducer()
# Verify model equivalence.
_verify_param_shape_across_processes(self.process_group, parameters)
# Sync params and buffers. Ensures all DDP models start off at the same value.
_sync_module_states(
module=self.module,
process_group=self.process_group,
broadcast_bucket_size=self.broadcast_bucket_size,
src=0,
params_and_buffers_to_ignore=self.parameters_to_ignore,
)
# In debug mode, build a mapping of parameter index -> parameter.
param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters)
# Builds reducer.
self._ddp_init_helper(
parameters, expect_sparse_gradient, param_to_name_mapping, static_graph
)
self._has_rebuilt_buckets = False
if static_graph:
self._set_static_graph()
def _build_replicated_tensor_module(self):
if self._use_replicated_tensor_module:
# Create a module with ReplicatedTensor without copying tensors. Avoid
# registering '_replicated_tensor_module' as a submodule by directly
# adding to self.__dict__.
from ._replicated_tensor_ddp_interop import _replicate_module
self.__dict__['_replicated_tensor_module'] = _replicate_module(self.module, self.process_group)
def _log_and_throw(self, err_type, err_msg):
if self.logger is not None:
self.logger.set_error_and_log(f"{str(err_type)}: {err_msg}")
raise err_type(err_msg)
def _ddp_init_helper(
self, parameters, expect_sparse_gradient, param_to_name_mapping,
static_graph
):
"""
Initialization helper function that does the following:
(1) bucketing the parameters for reductions
(2) resetting the bucketing states
(3) registering the grad hooks
(4) Logging construction-time DDP logging data
(5) passing a handle of DDP to SyncBatchNorm Layer
"""
self.num_iterations = 0
# Notice, the parameters order is not in the order in which they are used,
# especially in models with control flow.
#
# Alongside parameters are not presented in the real execution order,
# if a certain model happens to also
# 1) have other collectives comm ops in its backward graph.
# 2) have unused parameter in subset ranks of the whole world.
# bucketing could insert ALL-REDUCE comm op too early on the rank with unused parameter,
# matching up with other collectives comm ops on other ranks unexpectedly.
#
# In order to handle this corner case, when the parameters are not in the real execution order,
# we don't do bucketing, thus only one ALL-REDUCE is inserted after all the gradients
# of the whole graph are computed.
#
# Notice, here we only disable bucketing for the first iteration.
# After the first iteration, it's OK to rebuild buckets,
# because "bucket rebuild" bucketizes parameters based on its real execution order in backward graph.
# Can remove this branching once #73732 is landed.
if static_graph is True or self.find_unused_parameters is False:
bucket_size_limits = [sys.maxsize]
else:
bucket_size_limits = [dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap]
bucket_indices, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
parameters,
bucket_size_limits,
expect_sparse_gradient,
)
# Note: reverse list of buckets because we want to approximate the
# order in which their gradients are produced, and assume they
# are used in the forward pass in the order they are defined.
self.reducer = dist.Reducer(
parameters,
list(reversed(bucket_indices)),
list(reversed(per_bucket_size_limits)),
self.process_group,
expect_sparse_gradient,
# The bucket size limit is specified in the constructor.
# Additionally, we allow for a single small bucket for parameters
# that are defined first, such that their gradients don't spill into
# a much larger bucket, adding unnecessary latency after gradient
# computation finishes. Experiments showed 1MB is a reasonable value.
self.bucket_bytes_cap,
self.find_unused_parameters,
self.gradient_as_bucket_view,
param_to_name_mapping,
# User can set dist._DEFAULT_FIRST_BUCKET_BYTES to tune DDP first
# bucket.
dist._DEFAULT_FIRST_BUCKET_BYTES
)
self.logger = dist.Logger(self.reducer)
# Set as a weak reference to avoid reference cycle between
# logger and reducer.
self.reducer.set_logger(self.logger)
has_sync_bn = False
for submodule in self.module.modules():
if isinstance(submodule, torch.nn.SyncBatchNorm):
has_sync_bn = True
break
# Set logging data that can be got during construction time.
self.logger.set_construction_data_and_log(
self.module.__class__.__name__,
[] if self.device_ids is None else self.device_ids,
-1 if self.output_device is None else self.output_device,
self.broadcast_buffers,
has_sync_bn,
static_graph,
)
# passing a handle to torch.nn.SyncBatchNorm layer
self._passing_sync_batchnorm_handle(self.module)
def __getstate__(self):
self._check_default_group()
attrs = copy.copy(self.__dict__)
del attrs["process_group"]
del attrs["reducer"]
del attrs["logger"]
if self._use_replicated_tensor_module:
del attrs["_replicated_tensor_module"]
return attrs
def __setstate__(self, state):
# If serializable, then the process group should be the default one
self.process_group = _get_default_group()
super(DistributedDataParallel, self).__setstate__(state)
self._build_replicated_tensor_module()
self.__dict__.setdefault("require_forward_param_sync", True)
self.__dict__.setdefault("require_backward_grad_sync", True)
parameters, expect_sparse_gradient = self._build_params_for_reducer()
# In debug mode, build a mapping of parameter index -> parameter.
param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters)
# Builds reducer.
self._ddp_init_helper(
parameters, expect_sparse_gradient, param_to_name_mapping, self.static_graph
)
if self.static_graph:
self.reducer._set_static_graph()
self.logger._set_static_graph()
def _build_params_for_reducer(self):
# Build tuple of (module, parameter) for all parameters that require grads.
modules_and_parameters = [
(module, parameter)
for module_name, module in self.module.named_modules()
for parameter in [
param
# Note that we access module.named_parameters instead of
# parameters(module). parameters(module) is only needed in the
# single-process multi device case, where it accesses replicated
# parameters through _former_parameters.
for param_name, param in module.named_parameters(recurse=False)
if param.requires_grad
and f"{module_name}.{param_name}" not in self.parameters_to_ignore
]
]
# Deduplicate any parameters that might be shared across child modules.
memo = set()
modules_and_parameters = [
# "p not in memo" is the deduplication check.
# "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed.
(m, p) for m, p in modules_and_parameters
if p not in memo and not memo.add(p)
]
# Build list of parameters.
parameters = list(parameter for _, parameter in modules_and_parameters)
# Checks if a module will produce a sparse gradient.
def produces_sparse_gradient(module):
if isinstance(module, torch.nn.Embedding) or isinstance(
module, torch.nn.EmbeddingBag
):
return module.sparse
return False
# Build list of booleans indicating whether or not to expect sparse
# gradients for the corresponding parameters.
expect_sparse_gradient = list(produces_sparse_gradient(module) for module, _ in modules_and_parameters)
self._assign_modules_buffers()
return parameters, expect_sparse_gradient
def _assign_modules_buffers(self):
"""
Assigns module buffers to self.modules_buffers which are then used to
broadcast across ranks when broadcast_buffers=True. Note that this
must be called every time buffers need to be synced because buffers can
be reassigned by user module,
see https://github.com/pytorch/pytorch/issues/63916.
"""
# Collect buffers for modules, filtering out buffers that should be ignored.
named_module_buffers = [
(buffer, buffer_name)
for buffer_name, buffer in self.module.named_buffers()
if buffer_name not in self.parameters_to_ignore
]
self.modules_buffers = [
buffer
for (buffer, buffer_name) in named_module_buffers
]
# Dict[str, tensor] representing module buffers not ignored by DDP.
self.named_module_buffers = {
buffer_name: buffer for (buffer, buffer_name) in named_module_buffers
}
def _build_debug_param_to_name_mapping(self, parameters):
if dist.get_debug_level() == dist.DebugLevel.OFF:
return {}
param_to_param_index = {parameters[i]: i for i in range(len(parameters))}
param_set = set(parameters)
param_index_to_param_fqn = {}
for module_name, module in self.module.named_modules():
for param_name, param in module.named_parameters(recurse=False):
fqn = f"{module_name}.{param_name}"
# Bypass ignored parameters since those are not reduced by DDP
# to begin with.
if fqn not in self.parameters_to_ignore and param.requires_grad:
if param not in param_set:
self._log_and_throw(
ValueError,
f"Param with name {fqn} found in module parameters, but not DDP parameters."
" This indicates a bug in DDP, please report an issue to PyTorch.",
)
param_index = param_to_param_index[param]
param_index_to_param_fqn[param_index] = fqn
# Ensure we covered all parameters
if len(param_set) != len(param_index_to_param_fqn):
self._log_and_throw(
ValueError,
(
"Expected param to name mapping to cover all parameters, but"
f" got conflicting lengths: {len(param_set)} vs "
f"{len(param_index_to_param_fqn)}. This indicates a bug in DDP"
", please report an issue to PyTorch."
),
)
return param_index_to_param_fqn
def _get_parameters(self, m, recurse=True):
"""
Returns a generator of module parameters
"""
def model_parameters(m):
ps = (
m._former_parameters.values()
if hasattr(m, "_former_parameters")
else m.parameters(recurse=False)
)
for p in ps:
yield p
for m in m.modules() if recurse else [m]:
for p in model_parameters(m):
yield p
def _check_default_group(self):
pickle_not_supported = False
try:
if self.process_group != _get_default_group():
pickle_not_supported = True
except RuntimeError:
pickle_not_supported = True
if pickle_not_supported:
self._log_and_throw(
RuntimeError,
"DDP Pickling/Unpickling are only supported "
"when using DDP with the default process "
"group. That is, when you have called "
"init_process_group and have not passed "
"process_group argument to DDP constructor",
)
@contextmanager
def no_sync(self):
r"""
A context manager to disable gradient synchronizations across DDP
processes. Within this context, gradients will be accumulated on module
variables, which will later be synchronized in the first
forward-backward pass exiting the context.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
>>> with ddp.no_sync():
>>> for input in inputs:
>>> ddp(input).backward() # no synchronization, accumulate grads
>>> ddp(another_input).backward() # synchronize grads
"""
old_require_backward_grad_sync = self.require_backward_grad_sync
self.require_backward_grad_sync = False
try:
yield
finally:
self.require_backward_grad_sync = old_require_backward_grad_sync
def _run_ddp_forward(self, *inputs, **kwargs):
module_to_run = self._replicated_tensor_module if self._use_replicated_tensor_module else self.module
if self.device_ids:
inputs, kwargs = _to_kwargs(
inputs,
kwargs,
self.device_ids[0],
self.use_side_stream_for_tensor_copies
)
return module_to_run(*inputs[0], **kwargs[0])
else:
return module_to_run(*inputs, **kwargs)
def forward(self, *inputs, **kwargs):
with torch.autograd.profiler.record_function("DistributedDataParallel.forward"):
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.num_iterations += 1
self.reducer.prepare_for_forward()
# Notify the join context that this process has not joined, if
# needed
work = Join.notify_join_context(self)
if work:
self.reducer._set_forward_pass_work_handle(
work, self._divide_by_initial_world_size
)
# Calling _rebuild_buckets before forward compuation,
# It may allocate new buckets before deallocating old buckets
# inside _rebuild_buckets. To save peak memory usage,
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
logger.info("Reducer buckets have been rebuilt in this iteration.")
self._has_rebuilt_buckets = True
# sync params according to location (before/after forward) user
# specified as part of hook, if hook was specified.
buffer_hook_registered = hasattr(self, 'buffer_hook')
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
if self._join_config.enable:
# Notify joined ranks whether they should sync in backwards pass or not.
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
output = self._run_ddp_forward(*inputs, **kwargs)
# sync params according to location (before/after forward) user
# specified as part of hook, if hook was specified.
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters and not self.static_graph:
# Do not need to populate this for static graph.
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
# TODO: DDPSink is currently enabled for unused parameter detection and
# static graph training for first iteration.
if (self.find_unused_parameters and not self.static_graph) or (
self.static_graph and self.num_iterations == 1
):
state_dict = {
'static_graph': self.static_graph,
'num_iterations': self.num_iterations,
}
output_tensor_list, treespec, output_is_rref = _tree_flatten_with_rref(
output
)
output_placeholders = [None for _ in range(len(output_tensor_list))]
# Do not touch tensors that have no grad_fn, which can cause issues
# such as https://github.com/pytorch/pytorch/issues/60733
for i, output in enumerate(output_tensor_list):
if torch.is_tensor(output) and output.grad_fn is None:
output_placeholders[i] = output
# When find_unused_parameters=True, makes tensors which require grad
# run through the DDPSink backward pass. When not all outputs are
# used in loss, this makes those corresponding tensors receive
# undefined gradient which the reducer then handles to ensure
# param.grad field is not touched and we don't error out.
passthrough_tensor_list = _DDPSink.apply(
self.reducer,
state_dict,
*output_tensor_list,
)
for i in range(len(output_placeholders)):
if output_placeholders[i] is None:
output_placeholders[i] = passthrough_tensor_list[i]
# Reconstruct output data structure.
output = _tree_unflatten_with_rref(
output_placeholders, treespec, output_is_rref
)
return output
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def to_kwargs(self, inputs, kwargs, device_id):
# Kept for BC
return _to_kwargs(
inputs, kwargs, device_id, self.use_side_stream_for_tensor_copies
)
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
if self._use_replicated_tensor_module:
self._replicated_tensor_module.train(mode)
return self
# When running in join mode, schedules an allreduce to notify joined ranks
# of whether backwards pass synchronization will run this iteraton or not.
def _check_global_requires_backward_grad_sync(self, is_joined_rank):
if not is_joined_rank and self.require_backward_grad_sync:
requires_sync_tensor = torch.ones(1, device=self.device)
else:
requires_sync_tensor = torch.zeros(1, device=self.device)
work = dist.all_reduce(
requires_sync_tensor, group=self.process_group, async_op=True
)
return work
# When running in join mode, checks and performs sync of module buffers if
# the models have buffers that should be synchronized in the forward pass.
def _check_and_sync_module_buffers(self):
if self._check_sync_bufs_pre_fwd():
authoritative_rank = self._find_common_rank(self._distributed_rank, False)
self._sync_module_buffers(authoritative_rank)
# When running in join model, agrees upon a common rank and broadcast model
# parameters to all other ranks.
def _sync_final_model(self, is_last_joiner):
# Agree upon the process that will be the authoritative model copy.
# The current rank is a candidate for being the authoritative copy if
# is_last_joiner=True. We break ties via picking the larger rank.
self._authoritative_rank = self._find_common_rank(
self._distributed_rank, is_last_joiner
)
_sync_module_states(
module=self.module,
process_group=self.process_group,
broadcast_bucket_size=self.broadcast_bucket_size,
src=self._authoritative_rank,
params_and_buffers_to_ignore=self.parameters_to_ignore
)
# Schedule comm ops to match those scheduled in the reducer's backward
# pass.
def _match_all_reduce_for_bwd_pass(self):
comm_work = []
# Schedule comm in the same order as Reducer schedules them, i.e.
# the order of the buckets. Retrieving the bucket order from the reducer
# ensures that we keep the same order in join mode, such as when bucket
# order is rebuilt dynamically.
# Returns grad_buckets in order, but real tensors are substituted with
# zero tensors of the same shape.
grad_buckets = self.reducer._get_zeros_like_grad_buckets()
for grad_bucket in grad_buckets:
# Joined processes contribute zero gradient. In the case that
# divide_by_initial_world_size=True, we divide grads by the static
# world size, if not, the dividing factor is reduced by the number
# of joined processes.
work = self.reducer._run_comm_hook(grad_bucket)
comm_work.append(work)
for work in comm_work:
work.wait()
# Allreduces the used parameter mapping across ranks.
def _match_unused_params_allreduce(self):
locally_used_param_map = self.reducer._get_local_used_map()
self.process_group.allreduce(locally_used_param_map)
def join(
self,
divide_by_initial_world_size: bool = True,
enable: bool = True,
throw_on_early_termination: bool = False,
):
r"""
A context manager to be used in conjunction with an instance of
:class:`torch.nn.parallel.DistributedDataParallel` to be
able to train with uneven inputs across participating processes.
This context manager will keep track of already-joined DDP processes,
and "shadow" the forward and backward passes by inserting collective
communication operations to match with the ones created by non-joined
DDP processes. This will ensure each collective call has a corresponding
call by already-joined DDP processes, preventing hangs or errors that
would otherwise happen when training with uneven inputs across
processes. Alternatively, if the flag ``throw_on_early_termination`` is
specified to be ``True``, all trainers will throw an error once one rank
runs out of inputs, allowing these errors to be caught and handled
according to application logic.
Once all DDP processes have joined, the context manager will broadcast
the model corresponding to the last joined process to all processes to
ensure the model is the same across all processes
(which is guaranteed by DDP).
To use this to enable training with uneven inputs across processes,
simply wrap this context manager around your training loop. No further
modifications to the model or data loading is required.
.. warning::
If the model or training loop this context manager is wrapped around
has additional distributed collective operations, such as
``SyncBatchNorm`` in the model's forward pass, then the flag
``throw_on_early_termination`` must be enabled. This is because this
context manager is not aware of non-DDP collective communication.
This flag will cause all ranks to throw when any one rank
exhausts inputs, allowing these errors to be caught and recovered
from across all ranks.
Args:
divide_by_initial_world_size (bool): If ``True``, will divide
gradients by the initial ``world_size`` DDP training was launched
with. If ``False``, will compute the effective world size
(number of ranks that have not depleted their inputs yet) and
divide gradients by that during allreduce. Set
``divide_by_initial_world_size=True`` to ensure every input
sample including the uneven inputs have equal weight in terms of
how much they contribute to the global gradient. This is
achieved by always dividing the gradient by the initial
``world_size`` even when we encounter uneven inputs. If you set
this to ``False``, we divide the gradient by the remaining
number of nodes. This ensures parity with training on a smaller
``world_size`` although it also means the uneven inputs would
contribute more towards the global gradient. Typically, you
would want to set this to ``True`` for cases where the last few
inputs of your training job are uneven. In extreme cases, where
there is a large discrepancy in the number of inputs, setting
this to ``False`` might provide better results.
enable (bool): Whether to enable uneven input detection or not. Pass
in ``enable=False`` to disable in cases where you know that
inputs are even across participating processes. Default is
``True``.
throw_on_early_termination (bool): Whether to throw an error
or continue training when at least one rank has exhausted
inputs. If ``True``, will throw upon the first rank reaching end
of data. If ``False``, will continue training with a smaller
effective world size until all ranks are joined. Note that if
this flag is specified, then the flag
``divide_by_initial_world_size`` would be ignored. Default
is ``False``.
Example::
>>> import torch
>>> import torch.distributed as dist
>>> import os
>>> import torch.multiprocessing as mp
>>> import torch.nn as nn
>>> # On each spawned worker
>>> def worker(rank):
>>> dist.init_process_group("nccl", rank=rank, world_size=2)
>>> torch.cuda.set_device(rank)
>>> model = nn.Linear(1, 1, bias=False).to(rank)
>>> model = torch.nn.parallel.DistributedDataParallel(
>>> model, device_ids=[rank], output_device=rank
>>> )
>>> # Rank 1 gets one more input than rank 0.
>>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]
>>> with model.join():
>>> for _ in range(5):
>>> for inp in inputs:
>>> loss = model(inp).sum()
>>> loss.backward()
>>> # Without the join() API, the below synchronization will hang
>>> # blocking for rank 1's allreduce to complete.
>>> torch.cuda.synchronize(device=rank)
"""
return Join(
[self],
enable,
throw_on_early_termination,
divide_by_initial_world_size=divide_by_initial_world_size,
)
def join_hook(
self,
**kwargs,
):
r"""
Returns the DDP join hook, which enables training on uneven inputs by
shadowing the collective communications in the forward and backward
passes.
Arguments:
kwargs (dict): a :class:`dict` containing any keyword arguments
to modify the behavior of the join hook at run time; all
:class:`Joinable` instances sharing the same join context
manager are forwarded the same value for ``kwargs``.
The hook supports the following keyword arguments:
divide_by_initial_world_size (bool, optional):
If ``True``, then gradients are divided by the initial world
size that DDP was launched with.
If ``False``, then gradients are divided by the effective world
size (i.e. the number of non-joined processes), meaning that
the uneven inputs contribute more toward the global gradient.
Typically, this should be set to ``True`` if the degree of
unevenness is small but can be set to ``False`` in extreme
cases for possibly better results.
Default is ``True``.
"""
divide_by_initial_world_size = kwargs.get("divide_by_initial_world_size", True)
return _DDPJoinHook(
self, divide_by_initial_world_size=divide_by_initial_world_size
)
@property
def join_device(self):
return self.device
@property
def join_process_group(self):
return self.process_group
def _register_buffer_comm_hook(
self,
state,
hook: callable,
comm_hook_location=_BufferCommHookLocation.POST_FORWARD
):
r"""
Allows custom registration of hooks that define how buffer are
synchronized across ranks. The hook takes in an optional state
and is passed in a Dict[str, Tensor] corresponding to buffer names
and the buffers, and can run arbitrary reductions on buffers as
opposed to DDP's default broadcast from rank 0. This is useful for
example if a counter needs to be summed or averaged across ranks
every iteration.
Args:
state (Any): Optional state that is passed to the hook.
hook (Callable): Callable with the following signature:
``hook(state: object, buffers: Dict[str, torch.Tensor])
-> Optional[List[torch.futures.Future[torch.Tensor]]]``
comm_hook_location (_BufferCommHookLocation): Enum value indicating
where to run the hook.
_BufferCommHookLocation.PRE_FORWARD means that the
hook will run _before_ the forward pass, and
_BufferCommHookLocation.POST_FORWARD means that the
hook will run _after_ the forward pass.
hook (Callable): Callable with the following signature:
``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``:
NOTE: To maximize performance, users can return a
List[torch.futures.Future] from their hook, and DDP will
install and await these hooks appropriately at the end of
the backward pass. This will ensure all buffers are
synchronized by the end of the backward pass. If this
setting is used, it is recommended to pass
comm_hook_location=_BufferCommHookLocation.POST_FORWARD,
which will trigger the hook after the forward pass.
If _BufferCommHookLocation.PRE_FORWARD is used, users must
ensure appropriate synchronization when manipulating GPU
buffers in the forward pass.
"""
assert callable(hook)
self.buffer_hook = _BufferCommHook(
buffer_comm_hook=hook,
buffer_comm_hook_state=state,
buffer_comm_hook_location=comm_hook_location
)
def register_comm_hook(self, state: object, hook: callable):
r"""
Registers a communication hook which is an enhancement that provides a
flexible hook to users where they can specify how DDP aggregates gradients
across multiple workers.
This hook would be very useful for researchers to try out new ideas. For
example, this hook can be used to implement several algorithms like GossipGrad
and gradient compression which involve different communication strategies for
parameter syncs while running Distributed DataParallel training.
Args:
state (object): Passed to the hook to maintain any state information during the training process.
Examples include error feedback in gradient compression,
peers to communicate with next in GossipGrad, etc.
It is locally stored by each worker
and shared by all the gradient tensors on the worker.
hook (Callable): Callable with the following signature:
``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``:
This function is called once the bucket is ready. The
hook can perform whatever processing is needed and return
a Future indicating completion of any async work (ex: allreduce).
If the hook doesn't perform any communication, it still
must return a completed Future. The Future should hold the
new value of grad bucket's tensors. Once a bucket is ready,
c10d reducer would call this hook and use the tensors returned
by the Future and copy grads to individual parameters.
Note that the future's return type must be a single tensor.
We also provide an API called ``get_future`` to retrieve a
Future associated with the completion of ``c10d.ProcessGroup.Work``.
``get_future`` is currently supported for NCCL and also supported for most
operations on GLOO and MPI, except for peer to peer operations (send/recv).
.. warning ::
Grad bucket's tensors will not be predivided by world_size. User is responsible
to divide by the world_size in case of operations like allreduce.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
.. warning ::
The Future object that hook returns should contain a single tensor
that has the same shape with the tensors inside grad bucket.
.. warning ::
``get_future`` API supports NCCL, and partially GLOO and MPI backends (no support
for peer-to-peer operations like send/recv) and will return a ``torch.futures.Future``.
Example::
Below is an example of a noop hook that returns the same tensor.
>>> def noop(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:
>>> fut = torch.futures.Future()
>>> fut.set_result(bucket.buffer())
>>> return fut
>>> # xdoctest: +SKIP('undefined name')
>>> ddp.register_comm_hook(state=None, hook=noop)
Example::
Below is an example of a Parallel SGD algorithm where gradients are encoded before
allreduce, and then decoded after allreduce.
>>> def encode_and_decode(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:
>>> encoded_tensor = encode(bucket.buffer()) # encode gradients
>>> fut = torch.distributed.all_reduce(encoded_tensor).get_future()
>>> # Define the then callback to decode.
>>> def decode(fut):
>>> decoded_tensor = decode(fut.value()[0]) # decode gradients
>>> return decoded_tensor
>>> return fut.then(decode)
>>> # xdoctest: +SKIP('undefined name')
>>> ddp.register_comm_hook(state=None, hook=encode_and_decode)
"""
self._check_comm_hook(hook)
self.logger._set_comm_hook_name(hook.__qualname__)
dist._register_comm_hook(self.reducer, state, hook)
def _register_builtin_comm_hook(self, comm_hook_type):
r"""
Registers a built-in communication hook that specifies how DDP
aggregates gradients across multiple workers.
The built-in hooks aim to provide efficient C++ implementations for certain hooks,
which might not be as efficient if implemented in Python using a Python communication hook.
Args:
comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as ALLREDUCE, FP16_COMPRESS, etc.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
Example::
Below is an example of a FP16 compression where gradients are
compressed into 16-bit floating-point numbers before allreduce, and
then decompressed after allreduce.
>>> # xdoctest: +SKIP('undefined name')
>>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)
"""
self.logger._set_comm_hook_name(str(comm_hook_type))
dist._register_builtin_comm_hook(self.reducer, comm_hook_type)
def _register_fused_optim(self, optim: Type, *args, optim_params=None, **kwargs):
r"""
Registers an optimizer with DDP such that the optimization for a
parameter will run immediately when that parameter's gradient is
finished with reduction, instead of waiting for all parameters'
gradients to finish reduction. This can result in a training speedup
depending on your workload since the optimizer can run while gradient
reduction for other parameters are still ongoing. In addition, this has
the potential to reduce peak memory consumption during training, as it
only needs to load the per-parameter optimizer states of a single
parameter at a time, instead of loading all per-parameter optimizer
states at once.
Args:
optim_cls (Type): a ``torch.optim.Optimizer`` class to be registered
as a fused optimizer.
*args (Sequence[Any]): Arguments to forward to `optim_cls`.
optim_params (Optional[Iterable[torch.Tensor]]): Set of parameters
to optimize, similar to `params` argument of traditional `torch.optim`
Optimizers. If this is omitted, all DDP model parameters will be
optimized.
**kwargs: (Dict[str, Any]): Keyword arguments to forward to `optim_cls`.
.. warning ::
_register_fused_optim should only be called once on a DDP instance,
and registering multiple fused optimizers for the same DDP model
is not currently supported. Please ping
https://github.com/pytorch/pytorch/issues/71595 if this is necessary
for your use case.
.. warning ::
_register_fused_optim and register_comm_hook currently do not
compose together, meaning that custom DDP communication hooks are
not supported with overlapped optimizers. Please ping
https://github.com/pytorch/pytorch/issues/71595 if this is necessary
for your use case.
.. warning ::
Gradient accumulation and DDP `no_sync` are currently not supported
with overlapped optimizer. Please ping
https://github.com/pytorch/pytorch/issues/71595 if this is necessary
for your use case.
Example::
>>> # xdoctest: +SKIP("No rendezvous handler")
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
>>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
>>> lr = 1e-2
>>> betas = (0.9, 0.99)
>>> eps = 1e-6
>>> net._register_fused_optim(torch.optim.Adam, lr, betas=betas, eps=eps)
>>> # Example with subset of parameters
>>> params_to_opt = [list(net.parameters())[0]]
>>> net._register_fused_optim(
... torch.optim.Adam, lr, optim_params=params_to_opt, betas=betas, eps=eps
... )
"""
# Note: importing in function, otherwise this will cause a circular
# import as optimizer_overlap module needs to import DistributedDataParallel.
from torch.distributed.algorithms._optimizer_overlap import _as_overlapped_optim
overlapped_optim = _as_overlapped_optim(optim, optim_params, *args, **kwargs)
try:
overlapped_optim.register_ddp(self)
except NotImplementedError:
raise RuntimeError(
f"{optim} does not support overlapped DDP. Please file an issue to PyTorch or the respective owner of {optim}."
)
def _distributed_broadcast_coalesced(
self, tensors, buffer_size, authoritative_rank=0
):
dist._broadcast_coalesced(
self.process_group, tensors, buffer_size, authoritative_rank
)
def _check_sync_bufs_post_fwd(self):
return (
self.will_sync_module_buffers() and
hasattr(self, 'buffer_hook') and
self.buffer_hook.buffer_comm_hook_location ==
_BufferCommHookLocation.POST_FORWARD
)
def _check_sync_bufs_pre_fwd(self):
return self.will_sync_module_buffers() and (
not hasattr(self, 'buffer_hook') or
self.buffer_hook.buffer_comm_hook_location
== _BufferCommHookLocation.PRE_FORWARD
)
def will_sync_module_buffers(self):
return (
self.require_forward_param_sync
and self.broadcast_buffers
and len(self.modules_buffers) > 0
)
def _find_common_rank(self, input_rank, rank_cond):
# -1 indicates that this rank is not under consideration to be the
# common_rank
rank_to_use = torch.tensor(
[input_rank if rank_cond else -1],
device=self.device,
)
dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)
if rank_to_use.item() == -1:
self._log_and_throw(
ValueError,
"BUG! Expected rank_cond to be true for at least one process."
" This indicates a bug in PyTorch, please report an issue.",
)
return rank_to_use.item()
def _sync_buffers(self):
with torch.no_grad():
# module buffer sync
# Synchronize buffers across processes.
# If we are running DDP with the join manager, we have to agree
# upon a rank to sync module buffers from, since rank 0 may
# already have been joined and have stale module buffers.
if self._join_config.enable:
authoritative_rank = self._find_common_rank(
self._distributed_rank, True
)
else:
# The process with rank 0 is considered the authoritative copy.
authoritative_rank = 0
# Update self.modules_buffers incase any buffers were
# reassigned.
self._assign_modules_buffers()
self._sync_module_buffers(authoritative_rank)
def _sync_module_buffers(self, authoritative_rank):
if not hasattr(self, 'buffer_hook'):
self._default_broadcast_coalesced(authoritative_rank=authoritative_rank)
else:
hook = self.buffer_hook.buffer_comm_hook
state = self.buffer_hook.buffer_comm_hook_state
futs = hook(state, self.named_module_buffers)
if futs is not None:
self.reducer._install_post_backward_futures(futs)
def _default_broadcast_coalesced(
self, bufs=None, bucket_size=None, authoritative_rank=0
):
"""
Broadcasts buffers from rank 0 to rest of workers. If bufs, bucket_size
are None, default values self.modules_buffers and
self.broadcast_bucket_size are used instead.
"""
if bufs is None:
bufs = self.modules_buffers
if bucket_size is None:
bucket_size = self.broadcast_bucket_size
self._distributed_broadcast_coalesced(
bufs,
bucket_size,
authoritative_rank
)
def _passing_sync_batchnorm_handle(self, module):
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm):
if self.device_type == "cpu":
self._log_and_throw(
ValueError, "SyncBatchNorm layers only work with GPU modules"
)
def _check_comm_hook(self, hook):
if not callable(hook):
self._log_and_throw(TypeError, "Communication hook must be callable.")
sig = inspect.signature(hook)
if (
sig.parameters["bucket"].annotation != inspect._empty
and sig.parameters["bucket"].annotation != dist.GradBucket
):
self._log_and_throw(
ValueError,
"Communication hook: bucket annotation should be dist.GradBucket.",
)
if (
sig.return_annotation != inspect._empty
and sig.return_annotation != torch.futures.Future[torch.Tensor]
):
self._log_and_throw(
ValueError,
"Communication hook: return annotation should be torch.futures.Future[torch.Tensor].",
)
if (
hook.__name__ in ["bf16_compress_hook", "bf16_compress_wrapper_hook"]
and
(
(torch.version.cuda is None and torch.version.hip is None)
or (torch.version.cuda is not None and int(torch.version.cuda.split('.')[0]) < 11)
or not dist.is_available()
or not dist.is_nccl_available()
or torch.cuda.nccl.version() < (2, 10)
)
):
self._log_and_throw(TypeError, "BF16 all reduce communication hook required CUDA 11+ and NCCL 2.10+.")
@property
def _distributed_rank(self):
return dist.get_rank(self.process_group)
@staticmethod
def _set_params_and_buffers_to_ignore_for_model(
module, params_and_buffers_to_ignore
):
"""
Sets parameters and buffers to be ignored by DDP. Expected format for
parameters is the fully qualified name: {module_name}.{param_name}, and
similarly, {module_name}.{buffer_name} for buffers. For example:
params_to_ignore = []
# NB: model here is vanilla PyTorch module, not yet wrapped with DDP.
for module_name, module in model.named_modules():
for param_name, param in module.named_parameters(recurse=False):
if should_ignore(param):
# Create expected format
fqn = f"{module_name}.{param_name}"
params_to_ignore.append(fqn)
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
model,
params_to_ignore
)
"""
# This is a workaround to set parameters and buffers DDP should ignore
# during synchronization. It will be removed when the API is finalized
# as part of addressing https://github.com/pytorch/pytorch/issues/43690.
module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore
def _get_ddp_logging_data(self):
r"""
This interface can be called after DistributedDataParallel() is
constructed. It returns a dictionary of logging data. It could help
for debugging and analysis. The loggind data includes DistributedDataParallel
constructor input parameters, some internal states of DistributedDataParallel
and performance metrics. Simply print the dictorinary and see what
these metrics are.
This is a prototype interface and subject to change in the future.
"""
ddp_logging_data = self.logger._get_ddp_logging_data()
return {**ddp_logging_data.strs_map, **ddp_logging_data.ints_map}
def _set_ddp_runtime_logging_sample_rate(self, sample_rate):
r"""
This interface allows users to set sample_rate of collecting
runtime stats. The runtime stats will be recorded for the
first 10 iterations, after 10 iteratons runtime stats will be
recorded once every "sample_rate" training iterations. In
default, runtime stats are recorded for the first 10 iterations,
after 10 iterations runtime stats are recorded once every
"kDDPRuntimeLoggingSampleRate=100" training iterations.
This is a prototype interface and subject to change in the future.
"""
if sample_rate < 1:
self._log_and_throw(
ValueError,
"DDP runtime logging sample rate should be equal or greater than 1",
)
self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)
def _set_static_graph(self):
"""
It is recommended to set static graph in the DDP constructor, which will
call this private API internally.
"""
# If self.static_graph has been set, no need to set it again
if self.static_graph:
warnings.warn(
"You've set static_graph to be True, no need to set it again."
)
return
self.static_graph = True
self.reducer._set_static_graph()
self.logger._set_static_graph()
if self.find_unused_parameters:
warnings.warn(
"You passed find_unused_parameters=true to DistributedDataParallel, "
"`_set_static_graph` will detect unused parameters automatically, so "
"you do not need to set find_unused_parameters=true, just be sure these "
"unused parameters will not change during training loop while calling "
"`_set_static_graph`."
)
|
pytorch-master
|
torch/nn/parallel/distributed.py
|
import torch
from torch.distributed._shard.replicated_tensor import ReplicatedTensor
class ReplicatedTensorFunction(torch.autograd.Function):
"""
Autograd function to ensure gradients are replicated between the
replicated tensor and the original one.
"""
@staticmethod
def forward(ctx, inp, process_group=None):
# set_materialize_grads(False) will ensure that None gradients stay as
# None and are not filled with zeros.
ctx.set_materialize_grads(False)
return ReplicatedTensor(inp, process_group)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def _make_replicated_tensor(tensor, process_group):
replicated_tensor = ReplicatedTensorFunction.apply(tensor, process_group)
replicated_tensor.grad = tensor.grad
return replicated_tensor
def _replicate_module_recurse(module, process_group):
replica = module._replicate_for_data_parallel()
for param_name, param in module._parameters.items():
if param is not None:
setattr(replica, param_name, _make_replicated_tensor(param, process_group))
else:
setattr(replica, param_name, param)
for buffer_name, buffer in module._buffers.items():
setattr(replica, buffer_name, buffer)
for module_name, child in module._modules.items():
setattr(replica, module_name, _replicate_module_recurse(child, process_group))
return replica
def _replicate_module(network, process_group):
from torch.nn.parallel.replicate import _replicatable_module # type: ignore[attr-defined]
if not _replicatable_module(network):
raise RuntimeError("Cannot replicate network where python modules are "
"childrens of ScriptModule")
return _replicate_module_recurse(network, process_group)
|
pytorch-master
|
torch/nn/parallel/_replicated_tensor_ddp_interop.py
|
from .modules import * # noqa: F403
|
pytorch-master
|
torch/nn/qat/__init__.py
|
from .modules import * # noqa: F403
|
pytorch-master
|
torch/nn/qat/dynamic/__init__.py
|
import torch
from torch.ao.quantization import activation_is_memoryless
class Linear(torch.nn.qat.Linear):
r"""
A linear module attached with FakeQuantize modules for weight,
used for dynamic quantization aware training.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
for documentation.
Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
default.
"""
def __init__(self, in_features, out_features, bias=True,
qconfig=None, device=None, dtype=None) -> None:
super().__init__(in_features, out_features, bias, qconfig, device, dtype)
if not activation_is_memoryless(qconfig):
raise ValueError(
"Dynamic QAT requires a memoryless observer." +
"This means a MovingAverage observer with averaging constant equal to 1"
)
|
pytorch-master
|
torch/nn/qat/dynamic/modules/linear.py
|
from .linear import Linear
__all__ = ["Linear"]
|
pytorch-master
|
torch/nn/qat/dynamic/modules/__init__.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.intrinsic import LinearReLU
from torch.nn.utils.parametrize import (
is_parametrized,
type_before_parametrizations,
transfer_parametrizations_and_params,
)
class Linear(nn.Linear):
r"""
A linear module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
for documentation.
Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
default.
Attributes:
weight: fake quant module for weight
"""
_FLOAT_MODULE = nn.Linear
def __init__(self, in_features, out_features, bias=True,
qconfig=None, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(in_features, out_features, bias, **factory_kwargs)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
def forward(self, input):
return F.linear(input, self.weight_fake_quant(self.weight), self.bias)
@classmethod
def from_float(cls, mod):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type_before_parametrizations(mod) == cls._FLOAT_MODULE, (
" qat."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
assert mod.qconfig, "Input float module must have a valid qconfig"
if type_before_parametrizations(mod) == LinearReLU:
mod = mod[0]
qconfig = mod.qconfig
qat_linear = cls(mod.in_features, mod.out_features, bias=mod.bias is not None, qconfig=qconfig)
if is_parametrized(mod, "weight"):
transfer_parametrizations_and_params(mod, qat_linear, "weight")
else:
qat_linear.weight = mod.weight
if is_parametrized(mod, "bias"):
transfer_parametrizations_and_params(mod, qat_linear, "bias")
else:
qat_linear.bias = mod.bias
return qat_linear
def to_float(self):
linear = torch.nn.Linear(self.in_features, self.out_features, self.bias is not None)
linear.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
linear.bias = torch.nn.Parameter(self.bias.detach())
linear.train(self.training)
return linear
|
pytorch-master
|
torch/nn/qat/modules/linear.py
|
from .linear import Linear
from .conv import Conv1d
from .conv import Conv2d
from .conv import Conv3d
from .embedding_ops import EmbeddingBag, Embedding
__all__ = [
"Linear",
"Conv1d",
"Conv2d",
"Conv3d",
"Embedding",
"EmbeddingBag",
]
|
pytorch-master
|
torch/nn/qat/modules/__init__.py
|
import torch
import torch.nn as nn
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.intrinsic import _FusedModule
from typing import Tuple, TypeVar, Union
from torch.nn.common_types import _size_1_t, _size_2_t, _size_3_t
MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
class _ConvNd(nn.modules.conv._ConvNd):
_FLOAT_MODULE = MOD
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Tuple[int, ...],
stride: Tuple[int, ...],
padding: Tuple[int, ...],
dilation: Tuple[int, ...],
transposed: bool,
output_padding: Tuple[int, ...],
groups: int,
bias: bool,
padding_mode: str,
qconfig=None,
device=None,
dtype=None) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
nn.modules.conv._ConvNd.__init__(self, in_channels, out_channels, kernel_size,
stride, padding, dilation, transposed,
output_padding, groups, bias, padding_mode, **factory_kwargs)
assert qconfig, 'qconfig must be provided for QAT module'
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
def forward(self, input):
return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
@staticmethod
def from_float(cls, mod):
r"""Create a qat module from a float module
Args:
`mod`: a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, (
"qat."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
)
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
if issubclass(type(mod), _FusedModule):
mod = mod[0] # type: ignore[index]
qconfig = mod.qconfig
qat_conv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
stride=mod.stride, padding=mod.padding, dilation=mod.dilation,
groups=mod.groups, bias=mod.bias is not None,
padding_mode=mod.padding_mode, qconfig=qconfig)
qat_conv.weight = mod.weight
qat_conv.bias = mod.bias
return qat_conv
def to_float(self):
""" This works for both single qat conv, and the qat conv - relu modules
to convert the qat module to a floating point module
"""
cls = type(self)
conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined, operator]
self.in_channels,
self.out_channels,
self.kernel_size, # type: ignore[arg-type]
self.stride, # type: ignore[arg-type]
self.padding, # type: ignore[arg-type]
self.dilation, # type: ignore[arg-type]
self.groups,
self.bias is not None,
self.padding_mode)
conv.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
conv.bias = torch.nn.Parameter(self.bias.detach())
# conv relu
if issubclass(cls, _FusedModule):
modules = [conv]
assert hasattr(cls, "_FLOAT_RELU_MODULE")
relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
modules.append(relu)
fused = cls._FLOAT_MODULE(*modules) # type: ignore[arg-type, attr-defined, operator]
fused.train(self.training)
return fused
else:
return conv
class Conv1d(_ConvNd, nn.Conv1d):
r"""
A Conv1d module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as :class:`~torch.nn.Conv1d`
Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nn.Conv1d
_FLOAT_CONV_MODULE = nn.Conv1d
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: Union[str, _size_1_t] = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
qconfig=None,
device=None,
dtype=None) -> None:
kernel_size_ = _single(kernel_size)
stride_ = _single(stride)
padding_ = padding if isinstance(padding, str) else _single(padding)
dilation_ = _single(dilation)
super().__init__(
in_channels,
out_channels,
kernel_size_,
stride=stride_,
padding=padding_,
dilation=dilation_,
transposed=False,
output_padding=_single(0),
groups=groups,
bias=bias,
padding_mode=padding_mode,
qconfig=qconfig,
device=device,
dtype=dtype)
@classmethod
def from_float(cls, mod):
return super().from_float(cls, mod)
class Conv2d(_ConvNd, nn.Conv2d):
r"""
A Conv2d module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.Conv2d`, please see
https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
for documentation.
Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nn.Conv2d
_FLOAT_CONV_MODULE = nn.Conv2d
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: Union[str, _size_2_t] = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
qconfig=None,
device=None,
dtype=None) -> None:
kernel_size_ = _pair(kernel_size)
stride_ = _pair(stride)
padding_ = padding if isinstance(padding, str) else _pair(padding)
dilation_ = _pair(dilation)
super().__init__(
in_channels,
out_channels,
kernel_size_,
stride=stride_,
padding=padding_,
dilation=dilation_,
transposed=False,
output_padding=_pair(0),
groups=groups,
bias=bias,
padding_mode=padding_mode,
qconfig=qconfig,
device=device,
dtype=dtype)
def forward(self, input):
return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
@classmethod
def from_float(cls, mod):
return super().from_float(cls, mod)
class Conv3d(_ConvNd, nn.Conv3d):
r"""
A Conv3d module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.Conv3d`, please see
https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
for documentation.
Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
default.
Attributes:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_MODULE = nn.Conv3d
_FLOAT_CONV_MODULE = nn.Conv3d
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: Union[str, _size_3_t] = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
qconfig=None,
device=None,
dtype=None) -> None:
kernel_size_ = _triple(kernel_size)
stride_ = _triple(stride)
padding_ = padding if isinstance(padding, str) else _triple(padding)
dilation_ = _triple(dilation)
super().__init__(
in_channels,
out_channels,
kernel_size_,
stride=stride_,
padding=padding_,
dilation=dilation_,
transposed=False,
output_padding=_triple(0),
groups=groups,
bias=bias,
padding_mode=padding_mode,
qconfig=qconfig,
device=device,
dtype=dtype)
def forward(self, input):
return self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias)
@classmethod
def from_float(cls, mod):
return super().from_float(cls, mod)
|
pytorch-master
|
torch/nn/qat/modules/conv.py
|
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['Embedding', 'EmbeddingBag']
class Embedding(nn.Embedding):
r"""
An embedding bag module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.Embedding`, please see
https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html#torch.nn.Embedding
for documentation.
Similar to `torch.nn.Embedding`, with FakeQuantize modules initialized to
default.
Attributes:
weight: fake quant module for weight
"""
_FLOAT_MODULE = nn.Embedding
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
max_norm=None, norm_type=2.0, scale_grad_by_freq=False,
sparse=False, _weight=None, device=None, dtype=None, qconfig=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(num_embeddings, embedding_dim, padding_idx, max_norm,
norm_type, scale_grad_by_freq, sparse, _weight,
**factory_kwargs)
assert qconfig, 'qconfig must be provided for QAT module'
assert qconfig.weight().qscheme == torch.per_channel_affine_float_qparams, \
'Embedding weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
str(qconfig.weight().qscheme)
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
def forward(self, input) -> Tensor:
return F.embedding(input, self.weight_fake_quant(self.weight), self.padding_idx,
self.max_norm, self.norm_type, self.scale_grad_by_freq,
self.sparse)
@classmethod
def from_float(cls, mod):
r"""Create a qat module from a float module
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
weight_qscheme = mod.qconfig.weight().qscheme # type: ignore[union-attr, operator]
assert weight_qscheme == torch.per_channel_affine_float_qparams, \
'Embedding weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
str(weight_qscheme)
qconfig = mod.qconfig
qat_embedding_bag = cls(mod.num_embeddings, mod.embedding_dim, mod.padding_idx,
mod.max_norm, mod.norm_type, mod.scale_grad_by_freq,
mod.sparse, mod.weight, qconfig=qconfig)
return qat_embedding_bag
def to_float(self):
embedding_bag = torch.nn.Embedding(self.num_embeddings, self.embedding_dim, self.padding_idx,
self.max_norm, self.norm_type, self.scale_grad_by_freq,
self.sparse, None)
embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
embedding_bag.train(self.training)
return embedding_bag
class EmbeddingBag(nn.EmbeddingBag):
r"""
An embedding bag module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.EmbeddingBag`, please see
https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html#torch.nn.EmbeddingBag
for documentation.
Similar to `torch.nn.EmbeddingBag`, with FakeQuantize modules initialized to
default.
Attributes:
weight: fake quant module for weight
"""
_FLOAT_MODULE = nn.EmbeddingBag
def __init__(self, num_embeddings, embedding_dim, max_norm=None,
norm_type=2.0, scale_grad_by_freq=False, mode='mean',
sparse=False, _weight=None, include_last_offset=False,
padding_idx=None, qconfig=None, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(num_embeddings, embedding_dim, max_norm, norm_type,
scale_grad_by_freq, mode, sparse, _weight,
include_last_offset, padding_idx, **factory_kwargs)
assert qconfig, 'qconfig must be provided for QAT module'
assert qconfig.weight().qscheme == torch.per_channel_affine_float_qparams, \
'Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
str(qconfig.weight().qscheme)
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
def forward(self, input, offsets=None, per_sample_weights=None) -> Tensor:
return F.embedding_bag(input, self.weight_fake_quant(self.weight), offsets,
self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.mode, self.sparse,
per_sample_weights, self.include_last_offset,
self.padding_idx)
@classmethod
def from_float(cls, mod):
r"""Create a qat module from a float module
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
assert mod.qconfig, 'Input float module must have a valid qconfig'
weight_qscheme = mod.qconfig.weight().qscheme # type: ignore[union-attr, operator]
assert weight_qscheme == torch.per_channel_affine_float_qparams, \
'Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + \
str(weight_qscheme)
qconfig = mod.qconfig
qat_embedding_bag = cls(mod.num_embeddings, mod.embedding_dim, mod.max_norm, mod.norm_type,
mod.scale_grad_by_freq, mod.mode, mod.sparse, mod.weight,
mod.include_last_offset, mod.padding_idx, qconfig=qconfig)
return qat_embedding_bag
def to_float(self):
embedding_bag = torch.nn.EmbeddingBag(self.num_embeddings, self.embedding_dim, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse,
None, self.include_last_offset, self.padding_idx)
embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
embedding_bag.train(self.training)
return embedding_bag
|
pytorch-master
|
torch/nn/qat/modules/embedding_ops.py
|
from .modules import * # noqa: F403
|
pytorch-master
|
torch/nn/quantized/__init__.py
|
r""" Functional interface (quantized)."""
from typing import List, Optional
import warnings
import torch
from torch import Tensor
from torch.nn.modules.utils import _pair, _triple
from torch.nn.quantized.modules.utils import _pair_from_first
from torch.jit.annotations import BroadcastingList2
# Although some of the functions and docstrings are mirrored from the torch.nn,
# we want to have them here for future changes.
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
r"""
Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size
:math:`sH \times sW` steps. The number of output features is equal to the number of
input planes.
.. note:: The input quantization parameters propagate to the output.
See :class:`~torch.nn.quantized.AvgPool2d` for details and output shape.
Args:
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.avg_pool2d' must be quantized!")
return torch.nn.functional.avg_pool2d(input, kernel_size, stride, padding,
ceil_mode, count_include_pad,
divisor_override)
def avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None):
r"""
Applies 3D average-pooling operation in :math:`kD \ times kH \times kW` regions by step size
:math:`sD \times sH \times sW` steps. The number of output features is equal to the number of
input planes.
.. note:: The input quantization parameters propagate to the output.
Args:
input: quantized input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
kernel_size: size of the pooling region. Can be a single number or a
tuple `(kD, kH, kW)`
stride: stride of the pooling operation. Can be a single number or a
tuple `(sD, sH, sW)`. Default: :attr:`kernel_size`
padding: implicit zero paddings on both sides of the input. Can be a
single number or a tuple `(padD, padH, padW)`. Default: 0
ceil_mode: when True, will use `ceil` instead of `floor` in the formula
to compute the output shape. Default: ``False``
count_include_pad: when True, will include the zero-padding in the
averaging calculation. Default: ``True``
divisor_override: if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.avg_pool3d' must be quantized!")
return torch.nn.functional.avg_pool3d(input, kernel_size, stride, padding,
ceil_mode, count_include_pad,
divisor_override)
def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
r"""
Applies a 2D adaptive average pooling over a quantized input signal composed
of several quantized input planes.
.. note:: The input quantization parameters propagate to the output.
See :class:`~torch.nn.quantized.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.functional.adaptive_avg_pool2d' must be quantized!")
return torch.nn.functional.adaptive_avg_pool2d(input, output_size)
def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
r"""
Applies a 3D adaptive average pooling over a quantized input signal composed
of several quantized input planes.
.. note:: The input quantization parameters propagate to the output.
See :class:`~torch.nn.quantized.AdaptiveAvgPool3d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
"""
if not input.is_quantized:
raise ValueError(
"Input to 'quantized.functional.adaptive_avg_pool3d' must be quantized!")
return torch.nn.functional.adaptive_avg_pool3d(input, output_size)
def conv1d(input, weight, bias,
stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros',
scale=1.0, zero_point=0,
dtype=torch.quint8):
r"""
Applies a 1D convolution over a quantized 1D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv1d` for details and output shape.
Args:
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`
weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , iW)`
bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sW,)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padW,)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dW,)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
Examples::
>>> from torch.nn.quantized import functional as qF
>>> filters = torch.randn(33, 16, 3, dtype=torch.float)
>>> inputs = torch.randn(20, 16, 50, dtype=torch.float)
>>> bias = torch.randn(33, dtype=torch.float)
>>>
>>> scale, zero_point = 1.0, 0
>>> dtype_inputs = torch.quint8
>>> dtype_filters = torch.qint8
>>>
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
>>> qF.conv1d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
stride = _pair_from_first(stride)
padding = _pair_from_first(padding)
dilation = _pair_from_first(dilation)
packed_params = torch.ops.quantized.conv1d_prepack(
weight, bias, stride, padding, dilation, groups)
return torch.ops.quantized.conv1d(input, packed_params, scale, zero_point)
def conv2d(input, weight, bias,
stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros',
scale=1.0, zero_point=0,
dtype=torch.quint8):
r"""
Applies a 2D convolution over a quantized 2D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv2d` for details and output shape.
Args:
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`
weight: quantized filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)`
bias: **non-quantized** bias tensor of shape :math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the
number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
Examples::
>>> from torch.nn.quantized import functional as qF
>>> filters = torch.randn(8, 4, 3, 3, dtype=torch.float)
>>> inputs = torch.randn(1, 4, 5, 5, dtype=torch.float)
>>> bias = torch.randn(8, dtype=torch.float)
>>>
>>> scale, zero_point = 1.0, 0
>>> dtype_inputs = torch.quint8
>>> dtype_filters = torch.qint8
>>>
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
>>> qF.conv2d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
packed_params = torch.ops.quantized.conv2d_prepack(
weight, bias, stride, padding, dilation, groups)
return torch.ops.quantized.conv2d(input, packed_params, scale, zero_point)
def conv3d(input, weight, bias, stride=1, padding=0, dilation=1, groups=1,
padding_mode='zeros', scale=1.0, zero_point=0, dtype=torch.quint8):
r"""
Applies a 3D convolution over a quantized 3D input composed of several input
planes.
See :class:`~torch.nn.quantized.Conv3d` for details and output shape.
Args:
input: quantized input tensor of shape
:math:`(\text{minibatch} , \text{in\_channels} , iD , iH , iW)`
weight: quantized filters of shape
:math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)`
bias: **non-quantized** bias tensor of shape
:math:`(\text{out\_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sD, sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padD, padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dD, dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in\_channels}` should be
divisible by the number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for
quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
Examples::
>>> from torch.nn.quantized import functional as qF
>>> filters = torch.randn(8, 4, 3, 3, 3, dtype=torch.float)
>>> inputs = torch.randn(1, 4, 5, 5, 5, dtype=torch.float)
>>> bias = torch.randn(8, dtype=torch.float)
>>>
>>> scale, zero_point = 1.0, 0
>>> dtype_inputs = torch.quint8
>>> dtype_filters = torch.qint8
>>>
>>> q_filters = torch.quantize_per_tensor(filters, scale, zero_point, dtype_filters)
>>> q_inputs = torch.quantize_per_tensor(inputs, scale, zero_point, dtype_inputs)
>>> qF.conv3d(q_inputs, q_filters, bias, padding=1, scale=scale, zero_point=zero_point)
""" # noqa: E501
if padding_mode != 'zeros':
raise NotImplementedError("Only zero-padding is supported!")
if input.dtype != torch.quint8:
raise NotImplementedError("Only torch.quint8 is supported for activation tensor!")
if weight.dtype != torch.qint8:
raise NotImplementedError("Only torch.qint8 is supported for weight tensor!")
if input.ndim != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
packed_params = torch.ops.quantized.conv3d_prepack(
weight, bias, stride, padding, dilation, groups)
return torch.ops.quantized.conv3d(input, packed_params, scale, zero_point)
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
r"""Down/up samples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
See :func:`torch.nn.functional.interpolate` for implementation details.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D/3D input is supported for quantized inputs
.. note:: Only the following modes are supported for the quantized inputs:
- `bilinear`
- `nearest`
Args:
input (Tensor): the input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple.
mode (str): algorithm used for upsampling:
``'nearest'`` | ``'bilinear'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'bilinear'``.
Default: ``False``
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.interpolate' must be quantized!")
return torch.nn.functional.interpolate(input, size, scale_factor, mode,
align_corners)
def linear(
input: Tensor, weight: Tensor, bias: Optional[Tensor] = None,
scale: Optional[float] = None, zero_point: Optional[int] = None
) -> Tensor:
r"""
Applies a linear transformation to the incoming quantized data:
:math:`y = xA^T + b`.
See :class:`~torch.nn.quantized.Linear`
.. note::
Current implementation packs weights on every call, which has penalty on performance.
If you want to avoid the overhead, use :class:`~torch.nn.quantized.Linear`.
Args:
input (Tensor): Quantized input of type `torch.quint8`
weight (Tensor): Quantized weight of type `torch.qint8`
bias (Tensor): None or fp32 bias of type `torch.float`
scale (double): output scale. If None, derived from the input scale
zero_point (long): output zero point. If None, derived from the input zero_point
Shape:
- Input: :math:`(N, *, in\_features)` where `*` means any number of
additional dimensions
- Weight: :math:`(out\_features, in\_features)`
- Bias: :math:`(out\_features)`
- Output: :math:`(N, *, out\_features)`
"""
if scale is None:
scale = input.q_scale()
if zero_point is None:
zero_point = input.q_zero_point()
_packed_params = torch.ops.quantized.linear_prepack(weight, bias)
return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)
def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
r"""Applies a 1D max pooling over a quantized input signal composed of
several quantized input planes.
.. note:: The input quantization parameters are propagated to the output.
See :class:`~torch.nn.quantized.MaxPool1d` for details.
"""
if return_indices:
raise NotImplementedError("return_indices is not yet implemented!")
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch.nn.functional.max_pool1d(input, kernel_size, stride, padding,
dilation, ceil_mode=ceil_mode, return_indices=return_indices)
def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
r"""Applies a 2D max pooling over a quantized input signal composed of
several quantized input planes.
.. note:: The input quantization parameters are propagated to the output.
See :class:`~torch.nn.quantized.MaxPool2d` for details.
"""
if return_indices:
raise NotImplementedError("return_indices is not yet implemented!")
if stride is None:
stride = torch.jit.annotate(List[int], [])
return torch.nn.functional.max_pool2d(input, kernel_size, stride, padding,
dilation, ceil_mode=ceil_mode, return_indices=return_indices)
def celu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.) -> Tensor:
r"""celu(input, scale, zero_point, alpha=1.) -> Tensor
Applies the quantized CELU function element-wise.
.. math::
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x / \alpha) - 1))
Args:
input: quantized input
alpha: the :math:`\alpha` value for the CELU formulation. Default: 1.0
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.celu' must be quantized!")
return torch.ops.quantized.celu(input, scale, zero_point, alpha)
def leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False,
scale: Optional[float] = None, zero_point: Optional[int] = None):
r"""
Quantized version of the.
leaky_relu(input, negative_slope=0.01, inplace=False, scale, zero_point) -> Tensor
Applies element-wise,
:math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)`
Args:
input: Quaintized input
negative_slope: The slope of the negative input
inplace: Inplace modification of the input tensor
scale, zero_point: Scale and zero point of the output tensor.
See :class:`~torch.nn.LeakyReLU` for more details.
"""
if scale is not None and zero_point is not None:
assert not inplace, "Cannot rescale with `inplace`"
output = torch._empty_affine_quantized(
input.shape, scale=scale, zero_point=int(zero_point), dtype=input.dtype)
torch._C._nn.leaky_relu(input, negative_slope, out=output)
return output
if inplace:
result = torch._C._nn.leaky_relu_(input, negative_slope)
else:
result = torch._C._nn.leaky_relu(input, negative_slope)
return result
def hardtanh(input: Tensor, min_val: float = -1., max_val: float = 1., inplace: bool = False) -> Tensor:
r"""This is the quantized version of :func:`~torch.nn.functional.hardtanh`.
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.hardtanh' must be quantized!")
if inplace:
return torch._C._nn.hardtanh_(input, min_val, max_val)
return torch._C._nn.hardtanh(input, min_val, max_val)
def hardswish(input: Tensor, scale: float, zero_point: int) -> Tensor:
r"""This is the quantized version of :func:`~torch.nn.functional.hardswish`.
Args:
input: quantized input
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.hardswish' must be quantized!")
return torch._ops.ops.quantized.hardswish(input, scale, zero_point)
def threshold(input: Tensor, threshold: float, value: float) -> Tensor:
r"""Applies the quantized version of the threshold function element-wise:
.. math::
x = \begin{cases}
x & \text{if~} x > \text{threshold} \\
\text{value} & \text{otherwise}
\end{cases}
See :class:`~torch.nn.Threshold` for more details.
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.threshold' must be quantized!")
if threshold is None:
raise ValueError("Input to 'threshold' must be specified!")
if value is None:
raise ValueError("Input to 'value' must be specified!")
return torch._ops.ops.quantized.threshold(input, threshold, value)
def elu(input: Tensor, scale: float, zero_point: int, alpha: float = 1.) -> Tensor:
r"""This is the quantized version of :func:`~torch.nn.functional.elu`.
Args:
input: quantized input
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
alpha: the alpha constant
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.elu' must be quantized!")
return torch.ops.quantized.elu(input, scale, zero_point, alpha)
def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor:
r"""This is the quantized version of :func:`~torch.nn.functional.hardsigmoid`.
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.hardsigmoid' must be quantized!")
if inplace:
return torch._C._nn.hardsigmoid_(input) # type: ignore[attr-defined]
return torch._C._nn.hardsigmoid(input)
def clamp(input: Tensor, min_: float, max_: float) -> Tensor:
r"""float(input, min\_, max\_) -> Tensor
Applies the clamp function element-wise.
See :class:`~torch.nn.quantized.clamp` for more details.
Args:
input: quantized input
min_: minimum value for clamping
max_: maximum value for clamping
"""
if not input.is_quantized:
raise ValueError("Input to 'quantized.clamp' must be quantized!")
return torch.clamp(input, min_, max_)
def upsample(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
r"""Upsamples the input to either the given :attr:`size` or the given
:attr:`scale_factor`
.. warning::
This function is deprecated in favor of
:func:`torch.nn.quantized.functional.interpolate`.
This is equivalent with ``nn.quantized.functional.interpolate(...)``.
See :func:`torch.nn.functional.interpolate` for implementation details.
The input dimensions are interpreted in the form:
`mini-batch x channels x [optional depth] x [optional height] x width`.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D input is supported for quantized inputs
.. note:: Only the following modes are supported for the quantized inputs:
- `bilinear`
- `nearest`
Args:
input (Tensor): quantized input tensor
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]):
output spatial size.
scale_factor (float or Tuple[float]): multiplier for spatial size. Has to be an integer.
mode (str): algorithm used for upsampling:
``'nearest'`` | ``'bilinear'``
align_corners (bool, optional): Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to ``True``, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to ``False``, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation *independent* of input size
when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode`
is ``'bilinear'``.
Default: ``False``
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`bilinear`) don't proportionally align the
output and input pixels, and thus the output values can depend on the
input size. This was the default behavior for these modes up to version
0.3.1. Since then, the default behavior is ``align_corners = False``.
See :class:`~torch.nn.Upsample` for concrete examples on how this
affects the outputs.
"""
warnings.warn("nn.quantized.functional.upsample is deprecated. Use nn.quantized.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode, align_corners)
def upsample_bilinear(input, size=None, scale_factor=None):
r"""Upsamples the input, using bilinear upsampling.
.. warning::
This function is deprecated in favor of
:func:`torch.nn.quantized.functional.interpolate`.
This is equivalent with
``nn.quantized.functional.interpolate(..., mode='bilinear', align_corners=True)``.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D inputs are supported
Args:
input (Tensor): quantized input
size (int or Tuple[int, int]): output spatial size.
scale_factor (int or Tuple[int, int]): multiplier for spatial size
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True)
def upsample_nearest(input, size=None, scale_factor=None):
r"""Upsamples the input, using nearest neighbours' pixel values.
.. warning::
This function is deprecated in favor of
:func:`torch.nn.quantized.functional.interpolate`.
This is equivalent with ``nn.quantized.functional.interpolate(..., mode='nearest')``.
.. note:: The input quantization parameters propagate to the output.
.. note:: Only 2D inputs are supported
Args:
input (Tensor): quantized input
size (int or Tuple[int, int] or Tuple[int, int, int]): output spatial
size.
scale_factor (int): multiplier for spatial size. Has to be an integer.
"""
# DeprecationWarning is ignored by default
warnings.warn("nn.quantized.functional.upsample_nearest is deprecated. Use nn.quantized.functional.interpolate instead.")
return interpolate(input, size, scale_factor, mode='nearest')
|
pytorch-master
|
torch/nn/quantized/functional.py
|
from .modules import * # noqa: F403
|
pytorch-master
|
torch/nn/quantized/_reference/__init__.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Dict, Any
from .utils import ReferenceQuantizedModule
class Linear(nn.Linear, ReferenceQuantizedModule):
""" A reference quantized linear module that fits into the FX
Graph Mode Quantization workflow
activation will be floating point Tensor, we will store floating
point weight as well in the module, but in forward we'll quantize
and dequantize the weight before running the floating point functional
linear operator.
"""
_IS_REFERENCE = True
def __init__(
self,
in_features: int,
out_features: int,
bias_: bool = True,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
weight_qparams: Optional[Dict[str, Any]] = None):
super().__init__(in_features, out_features, bias_, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def _get_name(self):
return "QuantizedLinear(Reference)"
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.linear ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.linear --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized linear
"""
weight_quant_dequant = self.get_weight()
result = F.linear(x, weight_quant_dequant, self.bias)
return result
@classmethod
def from_float(cls, float_linear, weight_qparams):
qref_linear = Linear(
float_linear.in_features, float_linear.out_features,
float_linear.bias is not None, device=float_linear.weight.device,
dtype=float_linear.weight.dtype, weight_qparams=weight_qparams)
qref_linear.weight = torch.nn.Parameter(float_linear.weight.detach())
if float_linear.bias is not None:
qref_linear.bias = torch.nn.Parameter(float_linear.bias.detach())
return qref_linear
|
pytorch-master
|
torch/nn/quantized/_reference/modules/linear.py
|
from .linear import Linear
from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .rnn import RNNCell, LSTMCell, GRUCell, LSTM
from .sparse import Embedding, EmbeddingBag
__all__ = [
'Linear',
'Conv1d',
'Conv2d',
'Conv3d',
'ConvTranspose1d',
'ConvTranspose2d',
'ConvTranspose3d',
'RNNCell',
'LSTMCell',
'GRUCell',
'LSTM',
'Embedding',
'EmbeddingBag',
]
|
pytorch-master
|
torch/nn/quantized/_reference/modules/__init__.py
|
import torch
from typing import Dict, Any
class ReferenceQuantizedModule(torch.nn.Module):
def _init_weight_qparams(self, weight_qparams, device):
if weight_qparams is None:
weight_qparams = {
"qscheme": torch.per_tensor_affine,
"dtype": torch.quint8,
"scale": 1.0,
"zero_point": 0
}
self.weight_qscheme: torch.qscheme = weight_qparams["qscheme"]
self.weight_dtype = weight_qparams["dtype"]
assert self.weight_qscheme in [
None, torch.per_tensor_affine, torch.per_channel_affine,
torch.per_channel_affine_float_qparams], \
Exception(f"qscheme: {self.weight_qscheme} is not support in reference quantized {self._get_name()}")
if self.weight_dtype in [torch.quint8, torch.qint8, torch.quint4x2, torch.qint32]:
zero_point_dtype = weight_qparams["zero_point"].dtype if \
isinstance(weight_qparams["zero_point"], torch.Tensor) else \
torch.int
w_scale = weight_qparams["scale"]
w_scale_tensor = w_scale.clone().detach() \
if isinstance(w_scale, torch.Tensor) \
else torch.tensor(w_scale, dtype=torch.float, device=device)
self.register_buffer("weight_scale", w_scale_tensor)
w_zp = weight_qparams["zero_point"]
w_zp_tensor = w_zp.clone().detach() \
if isinstance(w_zp, torch.Tensor) \
else torch.tensor(w_zp, dtype=zero_point_dtype, device=device)
self.register_buffer("weight_zero_point", w_zp_tensor)
if self.weight_qscheme in [torch.per_channel_affine, torch.per_channel_affine_float_qparams]:
w_axis = weight_qparams["axis"]
w_axis_tensor = w_axis.clone().detach() \
if isinstance(w_axis, torch.Tensor) \
else torch.tensor(w_axis, dtype=torch.int, device=device)
self.register_buffer("weight_axis", w_axis_tensor)
else:
# added for TorchScriptability, not used
self.register_buffer(
"weight_axis", torch.tensor(0, dtype=torch.int, device=device))
else:
# added for TorchScriptability, and for torch.float
self.register_buffer("weight_scale", torch.tensor(1.0, dtype=torch.float, device=device))
self.register_buffer("weight_zero_point", torch.tensor(0, dtype=torch.int, device=device))
self.register_buffer(
"weight_axis", torch.tensor(0, dtype=torch.int, device=device))
def get_weight(self):
"""
Fake quantize (quantize and dequantize) the weight with
the quantization parameters for weight, this is used to
simulate the numerics for the quantized weight in a quantized
model
"""
# suppress mypy warning
assert isinstance(self.weight_scale, torch.Tensor)
assert isinstance(self.weight_zero_point, torch.Tensor)
assert isinstance(self.weight_axis, torch.Tensor)
return _quantize_and_dequantize_weight(
self.weight, # type: ignore[arg-type]
self.weight_qscheme,
self.weight_dtype,
self.weight_scale,
self.weight_zero_point, self.weight_axis)
def get_quantized_weight(self):
# suppress mypy warning
assert isinstance(self.weight_scale, torch.Tensor)
assert isinstance(self.weight_zero_point, torch.Tensor)
assert isinstance(self.weight_axis, torch.Tensor)
return _quantize_weight(
self.weight, # type: ignore[arg-type]
self.weight_qscheme,
self.weight_dtype,
self.weight_scale,
self.weight_zero_point,
self.weight_axis)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
_save_weight_qparams(
destination, prefix, self.weight_qscheme, self.weight_dtype,
self.weight_scale, self.weight_zero_point, self.weight_axis)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
for key in _get_weight_qparam_keys(state_dict, prefix):
setattr(self, key, state_dict[prefix + key])
state_dict.pop(prefix + key)
super()._load_from_state_dict(
state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def _quantize_weight(
weight: torch.Tensor,
weight_qscheme: torch.qscheme,
weight_dtype: torch.dtype,
weight_scale: torch.Tensor,
weight_zero_point: torch.Tensor,
weight_axis: torch.Tensor):
if weight_dtype == torch.float16:
weight = weight.to(weight_dtype)
return weight
if weight_qscheme == torch.per_tensor_affine:
if weight_dtype in [torch.quint8, torch.qint8, torch.qint32]:
weight = torch.quantize_per_tensor(weight, weight_scale, weight_zero_point, weight_dtype)
return weight
elif weight_qscheme in [torch.per_channel_affine, torch.per_channel_affine_float_qparams]:
if weight_dtype in [torch.quint8, torch.qint8, torch.quint4x2, torch.qint32]:
weight = torch.quantize_per_channel(
weight, weight_scale,
weight_zero_point, weight_axis.item(), weight_dtype) # type: ignore[arg-type]
return weight
raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}")
def _quantize_and_dequantize_weight(
weight: torch.Tensor,
weight_qscheme: torch.qscheme,
weight_dtype: torch.dtype,
weight_scale: torch.Tensor,
weight_zero_point: torch.Tensor,
weight_axis: torch.Tensor):
""" Quantize and then dequantize the weight based on
the quantization parameters
"""
if weight_qscheme in [
torch.per_tensor_affine,
torch.per_channel_affine,
torch.per_channel_affine_float_qparams]:
weight_quant = _quantize_weight(
weight, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis)
weight_dequant = weight_quant.dequantize()
else:
weight_dequant = weight
return weight_dequant
def _save_weight_qparams(destination, prefix, weight_qscheme, weight_dtype, weight_scale, weight_zero_point, weight_axis):
destination[prefix + "weight_qscheme"] = weight_qscheme
destination[prefix + "weight_dtype"] = weight_dtype
if weight_qscheme is not None:
destination[prefix + "weight_scale"] = weight_scale
destination[prefix + "weight_zero_point"] = weight_zero_point
if weight_qscheme == torch.per_channel_affine:
destination[prefix + "weight_axis"] = weight_axis
def _get_weight_qparam_keys(
state_dict: Dict[str, Any],
prefix: str):
keys = ["weight_qscheme", "weight_dtype"]
weight_qscheme = state_dict[prefix + "weight_qscheme"]
if weight_qscheme is not None:
keys.append("weight_scale")
keys.append("weight_zero_point")
if weight_qscheme == torch.quantize_per_channel:
keys.append("weight_axis")
return keys
|
pytorch-master
|
torch/nn/quantized/_reference/modules/utils.py
|
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .utils import ReferenceQuantizedModule
from typing import Optional, Dict, Any
class Embedding(nn.Embedding, ReferenceQuantizedModule):
""" A reference quantized Embedding module that fits into the
FX Graph Mode Quantization workflow, activation will be floating point Tensor,
we will store floating point weight as well in the module, but in forward we'll
quantize and dequantize the weight before running the floating point functional
embedding operator.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None,
device=None, dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None) -> None:
super().__init__(num_embeddings, embedding_dim, padding_idx, max_norm,
norm_type, scale_grad_by_freq, sparse, _weight, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def _get_name(self):
return "QuantizedEmbedding(Reference)"
def forward(self, input: Tensor) -> Tensor:
weight_quant_dequant = self.get_weight()
return F.embedding(
input, weight_quant_dequant, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
@classmethod
def from_float(cls, mod, weight_qparams):
return cls(
mod.num_embeddings,
mod.embedding_dim,
mod.padding_idx,
mod.max_norm,
mod.norm_type,
mod.scale_grad_by_freq,
mod.sparse,
mod.weight,
mod.weight.device,
mod.weight.dtype,
weight_qparams)
class EmbeddingBag(nn.EmbeddingBag, ReferenceQuantizedModule):
""" A reference quantized EmbeddingBag module that fits into the
FX Graph Mode Quantization workflow, activation will be floating point Tensor,
we will store floating point weight as well in the module, but in forward we'll
quantize and dequantize the weight before running the floating point functional
embedding operator.
"""
def __init__(self, num_embeddings: int, embedding_dim: int,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
mode: str = 'mean', sparse: bool = False, _weight: Optional[Tensor] = None,
include_last_offset: bool = False, padding_idx: Optional[int] = None,
device=None, dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None) -> None:
super().__init__(num_embeddings, embedding_dim, max_norm, norm_type,
scale_grad_by_freq, mode, sparse, _weight, include_last_offset,
padding_idx, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def _get_name(self):
return "QuantizedEmbedding(Reference)"
def forward(self, input: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None) -> Tensor:
weight_quant_dequant = self.get_weight()
return F.embedding_bag(input, weight_quant_dequant, offsets,
self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.mode, self.sparse,
per_sample_weights, self.include_last_offset,
self.padding_idx)
@classmethod
def from_float(cls, mod, weight_qparams):
return cls(
mod.num_embeddings,
mod.embedding_dim,
mod.max_norm,
mod.norm_type,
mod.scale_grad_by_freq,
mod.mode,
mod.sparse,
mod.weight,
mod.include_last_offset,
mod.padding_idx,
mod.weight.device,
mod.weight.dtype,
weight_qparams
)
|
pytorch-master
|
torch/nn/quantized/_reference/modules/sparse.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Dict, Any, List
from torch.nn.common_types import _size_1_t
from .utils import ReferenceQuantizedModule
class _ConvNd(torch.nn.modules.conv._ConvNd, ReferenceQuantizedModule):
""" A reference version of nn.quantized.Conv2d
we will not pack the parameters in this module, since weight packing is an
optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
this is useful when user want to use this module in other backends like Glow.
"""
__annotations__ = {"bias": Optional[torch.Tensor]}
_IS_REFERENCE = True
@staticmethod
def from_float(cls, float_conv, weight_qparams):
qref_conv = cls(
float_conv.in_channels,
float_conv.out_channels,
float_conv.kernel_size, # type: ignore[arg-type]
float_conv.stride, # type: ignore[arg-type]
float_conv.padding, # type: ignore[arg-type]
float_conv.dilation, # type: ignore[arg-type]
float_conv.groups,
float_conv.bias is not None, # type: ignore[arg-type]
float_conv.padding_mode,
device=float_conv.weight.device,
dtype=float_conv.weight.dtype,
weight_qparams=weight_qparams)
qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach())
if float_conv.bias is not None:
qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach())
return qref_conv
class Conv1d(_ConvNd, nn.Conv1d):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
device=None,
dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None):
nn.Conv1d.__init__(
self, in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, bias, padding_mode, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.conv1d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.conv1d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv1d
"""
weight_quant_dequant = self.get_weight()
result = F.conv1d(
x, weight_quant_dequant, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return result
def _get_name(self):
return "QuantizedConv1d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams):
return _ConvNd.from_float(cls, float_conv, weight_qparams)
class Conv2d(_ConvNd, nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros',
device=None,
dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None):
nn.Conv2d.__init__(
self, in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, bias, padding_mode, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.conv2d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.conv2d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv2d
"""
weight_quant_dequant = self.get_weight()
result = F.conv2d(
x, weight_quant_dequant, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return result
def _get_name(self):
return "QuantizedConv2d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams):
return _ConvNd.from_float(cls, float_conv, weight_qparams)
class Conv3d(_ConvNd, nn.Conv3d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode="zeros",
device=None,
dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None):
nn.Conv3d.__init__(
self, in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, bias, padding_mode, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.conv3d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.conv3d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv3d
"""
weight_quant_dequant = self.get_weight()
result = F.conv3d(
x, weight_quant_dequant, self.bias, self.stride,
self.padding, self.dilation, self.groups)
return result
def _get_name(self):
return "QuantizedConv3d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams):
return _ConvNd.from_float(cls, float_conv, weight_qparams)
class _ConvTransposeNd(_ConvNd, torch.nn.modules.conv._ConvTransposeNd):
""" A reference version of nn.quantized.ConvTranspose2d
we will not pack the parameters in this module, since weight packing is an
optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
this is useful when user want to use this module in other backends like Glow.
"""
@staticmethod
def from_float(cls, float_conv, weight_qparams):
qref_conv = cls(
float_conv.in_channels,
float_conv.out_channels,
float_conv.kernel_size, # type: ignore[arg-type]
float_conv.stride, # type: ignore[arg-type]
float_conv.padding, # type: ignore[arg-type]
float_conv.output_padding, # type: ignore[arg-type]
float_conv.groups,
float_conv.bias is not None, # type: ignore[arg-type]
float_conv.dilation, # type: ignore[arg-type]
float_conv.padding_mode,
device=float_conv.weight.device,
dtype=float_conv.weight.dtype,
weight_qparams=weight_qparams)
qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach())
if float_conv.bias is not None:
qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach())
return qref_conv
class ConvTranspose1d(_ConvTransposeNd, nn.ConvTranspose1d):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
output_padding: _size_1_t = 0,
groups: int = 1,
bias: bool = True,
dilation: _size_1_t = 1,
padding_mode: str = "zeros",
device=None,
dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None):
nn.ConvTranspose1d.__init__(
self, in_channels, out_channels, kernel_size, stride, padding, output_padding,
groups, bias, dilation, padding_mode, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.convTranspose1d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.convTranspose1d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv1d
"""
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
output_padding = self._output_padding(
input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type]
weight_quant_dequant = self.get_weight()
result = F.conv_transpose1d(
x, weight_quant_dequant, self.bias, self.stride,
self.padding, output_padding, self.groups, self.dilation)
return result
def _get_name(self):
return "QuantizedConvTranspose1d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams):
return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams)
class ConvTranspose2d(_ConvTransposeNd, nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0,
groups=1, bias=True, dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None):
nn.ConvTranspose2d.__init__(
self, in_channels, out_channels, kernel_size, stride, padding, output_padding,
groups, bias, dilation, padding_mode, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.convTranspose2d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.convTranspose2d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv2d
"""
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
output_padding = self._output_padding(
input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type]
weight_quant_dequant = self.get_weight()
result = F.conv_transpose2d(
x, weight_quant_dequant, self.bias, self.stride,
self.padding, output_padding, self.groups, self.dilation)
return result
def _get_name(self):
return "QuantizedConvTranspose2d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams):
return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams)
class ConvTranspose3d(_ConvTransposeNd, nn.ConvTranspose3d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0,
groups=1, bias=True, dilation=1,
padding_mode="zeros",
device=None,
dtype=None,
weight_qparams: Optional[Dict[str, Any]] = None):
nn.ConvTranspose3d.__init__(
self, in_channels, out_channels, kernel_size, stride, padding, output_padding,
groups, bias, dilation, padding_mode, device, dtype)
self._init_weight_qparams(weight_qparams, device)
def forward(self, x: torch.Tensor, output_size: Optional[List[int]] = None) -> torch.Tensor:
"""
we have:
w(float) -- quant - dequant \
x(float) ------------- F.convTranspose3d ---
In the full model, we will see
w(float) -- quant - *dequant \
x -- quant --- *dequant -- *F.convTranspose3d --- *quant - dequant
and the backend should be able to fuse the ops with `*` into a quantized conv3d
"""
assert isinstance(self.padding, tuple)
# One cannot replace List by Tuple or Sequence in "_output_padding" because
# TorchScript does not support `Sequence[T]` or `Tuple[T, ...]`.
output_padding = self._output_padding(
input, output_size, self.stride, self.padding, self.kernel_size, self.dilation) # type: ignore[arg-type]
weight_quant_dequant = self.get_weight()
result = F.conv_transpose3d(
x, weight_quant_dequant, self.bias, self.stride,
self.padding, output_padding, self.groups, self.dilation)
return result
def _get_name(self):
return "QuantizedConvTranspose3d(Reference)"
@classmethod
def from_float(cls, float_conv, weight_qparams):
return _ConvTransposeNd.from_float(cls, float_conv, weight_qparams)
|
pytorch-master
|
torch/nn/quantized/_reference/modules/conv.py
|
import torch
import torch.nn as nn
from torch import Tensor
from .utils import _quantize_and_dequantize_weight
from .utils import _quantize_weight
from typing import Optional, Dict, Any, Tuple
from torch import _VF
from torch.nn.utils.rnn import PackedSequence
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
return tensor.index_select(dim, permutation)
def get_weight_and_quantization_params(module, wn):
weight = getattr(module, wn)
params = [weight]
for param_name in [wn + n for n in ["_qscheme", "_dtype", "_scale", "_zero_point", "_axis"]]:
if hasattr(module, param_name):
param = getattr(module, param_name)
else:
param = None
params.append(param)
return params
def get_quantized_weight(module, wn):
if not hasattr(module, wn):
return None
params = get_weight_and_quantization_params(module, wn)
weight = _quantize_weight(*params)
return weight
def get_quantize_and_dequantized_weight(module, wn):
if not hasattr(module, wn):
return None
params = get_weight_and_quantization_params(module, wn)
weight = _quantize_and_dequantize_weight(*params)
return weight
class RNNCellBase(nn.RNNCellBase):
def __init__(self, input_size: int, hidden_size: int, bias: bool, num_chunks: int,
device=None, dtype=None, weight_qparams_dict=None) -> None:
super().__init__(input_size, hidden_size, bias, num_chunks, device=device, dtype=dtype)
if weight_qparams_dict is None:
weight_qparams = {
"qscheme": torch.per_tensor_affine,
"dtype": torch.quint8,
"scale": 1.0,
"zero_point": 0
}
weight_qparams_dict = {
"weight_ih": weight_qparams,
"weight_hh": weight_qparams
}
assert len(weight_qparams_dict) == 2, "Expected length for weight_qparams_dict to be 2 for QuantizedRNNCellBase(Reference)"
self._init_weight_qparams_dict(weight_qparams_dict, device)
def _init_weight_qparams_dict(self, weight_qparams_dict, device):
assert weight_qparams_dict is not None
for key, weight_qparams in weight_qparams_dict.items():
# TODO: refactor the duplicated code to utils.py
weight_qscheme = weight_qparams["qscheme"]
weight_dtype = weight_qparams["dtype"]
setattr(self, key + "_qscheme", weight_qscheme)
setattr(self, key + "_dtype", weight_dtype)
assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \
Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}")
if weight_qscheme is not None:
scale = weight_qparams["scale"]
scale_tensor = scale.clone().detach() \
if isinstance(scale, torch.Tensor) else \
torch.tensor(scale, dtype=torch.float, device=device)
self.register_buffer(key + "_scale", scale_tensor)
zp = weight_qparams["zero_point"]
zp_tensor = zp.clone().detach() \
if isinstance(zp, torch.Tensor) else \
torch.tensor(zp, dtype=torch.int, device=device)
self.register_buffer(key + "_zero_point", zp_tensor)
if weight_qscheme == torch.per_channel_affine:
axis = weight_qparams["axis"]
axis_tensor = axis.clone().detach() \
if isinstance(axis, torch.Tensor) else \
torch.tensor(axis, dtype=torch.int, device=device)
self.register_buffer(key + "_axis", axis_tensor)
else:
# added for TorchScriptability, not used
self.register_buffer(
key + "_axis", torch.tensor(0, dtype=torch.int, device=device))
def _get_name(self):
return "QuantizedRNNCellBase(Reference)"
def get_quantized_weight_ih(self):
return get_quantized_weight(self, "weight_ih")
def get_quantized_weight_hh(self):
return get_quantized_weight(self, "weight_hh")
def get_weight_ih(self):
return get_quantize_and_dequantized_weight(self, "weight_ih")
def get_weight_hh(self):
return get_quantize_and_dequantized_weight(self, "weight_hh")
class RNNCell(RNNCellBase):
"""
We'll store weight_qparams for all the weights (weight_ih and weight_hh),
we need to pass in a `weight_qparams_dict` that maps from weight name,
e.g. weight_ih, to the weight_qparams for that weight
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool = True, nonlinearity: str = "tanh",
device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Dict[str, Any]]] = None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict}
super().__init__(input_size, hidden_size, bias, num_chunks=1, **factory_kwargs)
self.nonlinearity = nonlinearity
def _get_name(self):
return "QuantizedRNNCell(Reference)"
# TODO: refactor nn.RNNCell to have a _forward that takes weight_ih and weight_hh as input
# and remove duplicated code, same for the other two Cell modules
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
assert input.dim() in (1, 2), \
f"RNNCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
if self.nonlinearity == "tanh":
ret = _VF.rnn_tanh_cell(
input, hx,
self.get_weight_ih(), self.get_weight_hh(),
self.bias_ih, self.bias_hh,
)
elif self.nonlinearity == "relu":
ret = _VF.rnn_relu_cell(
input, hx,
self.get_weight_ih(), self.get_weight_hh(),
self.bias_ih, self.bias_hh,
)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
if not is_batched:
ret = ret.squeeze(0)
return ret
@classmethod
def from_float(cls, mod, weight_qparams_dict):
ref_mod = cls(
mod.input_size,
mod.hidden_size,
mod.bias,
mod.nonlinearity,
mod.weight_ih.device,
mod.weight_ih.dtype,
weight_qparams_dict)
ref_mod.weight_ih = mod.weight_ih
ref_mod.weight_hh = mod.weight_hh
ref_mod.bias_ih = mod.bias_ih
ref_mod.bias_hh = mod.bias_hh
return ref_mod
class LSTMCell(RNNCellBase):
"""
We'll store weight_qparams for all the weights (weight_ih and weight_hh),
we need to pass in a `weight_qparams_dict` that maps from weight name,
e.g. weight_ih, to the weight_qparams for that weight
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Dict[str, Any]]] = None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict}
super().__init__(input_size, hidden_size, bias, num_chunks=4, **factory_kwargs)
def _get_name(self):
return "QuantizedLSTMCell(Reference)"
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
assert input.dim() in (1, 2), \
f"LSTMCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
else:
hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
ret = _VF.lstm_cell(
input, hx,
self.get_weight_ih(), self.get_weight_hh(),
self.bias_ih, self.bias_hh,
)
if not is_batched:
ret = (ret[0].squeeze(0), ret[1].squeeze(0))
return ret
@classmethod
def from_float(cls, mod, weight_qparams_dict):
ref_mod = cls(
mod.input_size,
mod.hidden_size,
mod.bias,
mod.weight_ih.device,
mod.weight_ih.dtype,
weight_qparams_dict)
ref_mod.weight_ih = mod.weight_ih
ref_mod.weight_hh = mod.weight_hh
ref_mod.bias_ih = mod.bias_ih
ref_mod.bias_hh = mod.bias_hh
return ref_mod
class GRUCell(RNNCellBase):
"""
We'll store weight_qparams for all the weights (weight_ih and weight_hh),
we need to pass in a `weight_qparams_dict` that maps from weight name,
e.g. weight_ih, to the weight_qparams for that weight
"""
def __init__(self, input_size: int, hidden_size: int, bias: bool = True,
device=None, dtype=None, weight_qparams_dict: Optional[Dict[str, Dict[str, Any]]] = None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype, 'weight_qparams_dict': weight_qparams_dict}
super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs)
def _get_name(self):
return "QuantizedGRUCell(Reference)"
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
assert input.dim() in (1, 2), \
f"GRUCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
ret = _VF.gru_cell(
input, hx,
self.get_weight_ih(), self.get_weight_hh(),
self.bias_ih, self.bias_hh,
)
if not is_batched:
ret = ret.squeeze(0)
return ret
@classmethod
def from_float(cls, mod, weight_qparams_dict):
ref_mod = cls(
mod.input_size,
mod.hidden_size,
mod.bias,
mod.weight_ih.device,
mod.weight_ih.dtype,
weight_qparams_dict)
ref_mod.weight_ih = mod.weight_ih
ref_mod.weight_hh = mod.weight_hh
ref_mod.bias_ih = mod.bias_ih
ref_mod.bias_hh = mod.bias_hh
return ref_mod
class RNNBase(nn.RNNBase):
def __init__(self, mode: str, input_size: int, hidden_size: int,
num_layers: int = 1, bias: bool = True, batch_first: bool = False,
dropout: float = 0., bidirectional: bool = False, proj_size: int = 0,
device=None, dtype=None,
weight_qparams_dict: Optional[Dict[str, Dict[str, Any]]] = None) -> None:
super().__init__(
mode, input_size, hidden_size, num_layers, bias, batch_first, dropout,
bidirectional, proj_size, device, dtype
)
if weight_qparams_dict is None:
weight_qparams = {
'qscheme': torch.per_tensor_affine,
'dtype': torch.quint8,
'scale': 1.0,
'zero_point': 0
}
weight_qparams_dict = dict()
for wn in self._flat_weights_names:
if wn.startswith("weight"):
weight_qparams_dict[wn] = weight_qparams
self._init_weight_qparams_dict(weight_qparams_dict, device)
def _init_weight_qparams_dict(self, weight_qparams_dict, device):
for key, weight_qparams in weight_qparams_dict.items():
weight_qscheme = weight_qparams["qscheme"]
weight_dtype = weight_qparams["dtype"]
setattr(self, key + "_qscheme", weight_qscheme)
setattr(self, key + "_dtype", weight_dtype)
assert weight_qscheme in [None, torch.per_tensor_affine, torch.per_channel_affine], \
Exception(f"qscheme: {weight_qscheme} is not support in {self._get_name()}")
if weight_qscheme is not None:
self.register_buffer(
key + "_scale",
torch.tensor(weight_qparams["scale"], dtype=torch.float, device=device))
self.register_buffer(
key + "_zero_point",
torch.tensor(weight_qparams["zero_point"], dtype=torch.int, device=device))
if weight_qscheme == torch.per_channel_affine:
self.register_buffer(
key + "_axis",
torch.tensor(weight_qparams["axis"], dtype=torch.int, device=device))
else:
# added for TorchScriptability, not used
self.register_buffer(
key + "_axis", torch.tensor(0, dtype=torch.int, device=device))
class LSTM(RNNBase):
""" Reference Quantized LSTM Module
We'll store weight_qparams for all the weights in _flat_weights, we need to pass in
a `weight_qparams_dict` that maps from weight name, e.g. weight_ih_l0,
to the weight_qparams for that weight
"""
def __init__(self, *args, **kwargs):
super().__init__('LSTM', *args, **kwargs)
# Same as above, see torch/nn/modules/module.py::_forward_unimplemented
def permute_hidden(self, # type: ignore[override]
hx: Tuple[Tensor, Tensor],
permutation: Optional[Tensor]
) -> Tuple[Tensor, Tensor]:
if permutation is None:
return hx
return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation)
def get_expected_cell_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
# In the future, we should prevent mypy from applying contravariance rules here.
# See torch/nn/modules/module.py::_forward_unimplemented
def check_forward_args(self, # type: ignore[override]
input: Tensor,
hidden: Tuple[Tensor, Tensor],
batch_sizes: Optional[Tensor],
):
self.check_input(input, batch_sizes)
self.check_hidden_size(hidden[0], self.get_expected_hidden_size(input, batch_sizes),
'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], self.get_expected_cell_size(input, batch_sizes),
'Expected hidden[1] size {}, got {}')
def get_quantized_weight_bias_dict(self):
""" dictionary from flat_weight_name to quantized weight or (unquantized) bias
e.g.
{
"weight_ih_l0": quantized_weight,
"bias_ih_l0": unquantized_bias,
...
}
"""
quantized_weight_bias_dict = {}
for wn in self._flat_weights_names:
if hasattr(self, wn):
if wn.startswith("weight"):
weight_or_bias = get_quantized_weight(self, wn)
else:
weight_or_bias = getattr(self, wn)
else:
weight_or_bias = None
quantized_weight_bias_dict[wn] = weight_or_bias
return quantized_weight_bias_dict
def get_flat_weights(self):
flat_weights = []
for wn in self._flat_weights_names:
if hasattr(self, wn):
weight = getattr(self, wn)
if wn.startswith("weight"):
params = get_weight_and_quantization_params(self, wn)
weight = _quantize_and_dequantize_weight(*params)
else:
weight = None
flat_weights.append(weight)
return flat_weights
def forward(self, input, hx=None): # noqa: F811
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
batch_sizes = None
if isinstance(orig_input, PackedSequence):
input, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
real_hidden_size = self.proj_size if self.proj_size > 0 else self.hidden_size
h_zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, real_hidden_size,
dtype=input.dtype, device=input.device)
c_zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
hx = (h_zeros, c_zeros)
else:
if batch_sizes is None: # If not PackedSequence input.
if is_batched:
if (hx[0].dim() != 3 or hx[1].dim() != 3):
msg = ("For batched 3-D input, hx and cx should "
f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
raise RuntimeError(msg)
else:
if hx[0].dim() != 2 or hx[1].dim() != 2:
msg = ("For unbatched 2-D input, hx and cx should "
f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors")
raise RuntimeError(msg)
hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
if batch_sizes is None:
result = _VF.lstm(input, hx, self.get_flat_weights(), self.bias, self.num_layers,
self.dropout, self.training, self.bidirectional, self.batch_first)
else:
result = _VF.lstm(input, batch_sizes, hx, self.get_flat_weights(), self.bias,
self.num_layers, self.dropout, self.training, self.bidirectional)
output = result[0]
hidden = result[1:]
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched:
output = output.squeeze(batch_dim)
hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
return output, self.permute_hidden(hidden, unsorted_indices)
def _get_name(self):
return "QuantizedLSTM(Reference)"
@classmethod
def from_float(cls, mod, weight_qparams_dict):
ref_mod = cls(
mod.input_size,
mod.hidden_size,
mod.num_layers,
mod.bias,
mod.batch_first,
mod.dropout,
mod.bidirectional,
weight_qparams_dict=weight_qparams_dict)
for wn in mod._flat_weights_names:
setattr(ref_mod, wn, getattr(mod, wn))
return ref_mod
|
pytorch-master
|
torch/nn/quantized/_reference/modules/rnn.py
|
from .modules import * # noqa: F403
|
pytorch-master
|
torch/nn/quantized/dynamic/__init__.py
|
import torch
import torch.nn.quantized as nnq
import torch.nn.intrinsic as nni
from torch.nn.quantized.modules.utils import _quantize_weight
class Linear(nnq.Linear):
r"""
A dynamic quantized linear module with floating point tensor as inputs and outputs.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module which are of
shape :math:`(\text{out\_features}, \text{in\_features})`.
bias (Tensor): the non-learnable floating point bias of the module of shape
:math:`(\text{out\_features})`. If :attr:`bias` is ``True``,
the values are initialized to zero.
Examples::
>>> m = nn.quantized.dynamic.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> # xdoctest: +SKIP
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
# version used in this class is different from the parent class nnq.Linear
_version = 4
def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8):
super(Linear, self).__init__(in_features, out_features, bias_, dtype=dtype)
# We don't muck around with buffers or attributes or anything here
# to keep the module simple. *everything* is simply a Python attribute.
# Serialization logic is explicitly handled in the below serialization and
# deserialization modules
self.version = 4
def forward(self, x):
# Note that we can handle self.bias == None case.
if self._packed_params.dtype == torch.qint8:
if self.version is None or self.version < 4:
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params._packed_params)
else:
Y = torch.ops.quantized.linear_dynamic(
x, self._packed_params._packed_params, reduce_range=True)
elif self._packed_params.dtype == torch.float16:
Y = torch.ops.quantized.linear_dynamic_fp16(
x, self._packed_params._packed_params)
else:
raise RuntimeError('Unsupported dtype on dynamic quantized linear!')
return Y.to(x.dtype)
def _get_name(self):
return 'DynamicQuantizedLinear'
def extra_repr(self):
extra_repr_str = 'in_features={}, out_features={}, dtype={}'.format(
self.in_features, self.out_features, self._packed_params.dtype
)
if self._packed_params.dtype == torch.qint8:
extra_repr_str += ', qscheme={}'.format(self.weight().qscheme())
return extra_repr_str
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
self.version = version
super(Linear, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
@classmethod
def from_float(cls, mod):
r"""Create a dynamic quantized module from a float module or qparams_dict
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
float_modules = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear,
torch.nn.intrinsic.modules.fused.LinearReLU, torch.nn.qat.dynamic.Linear]
assert type(mod) in float_modules, \
'nn.quantized.dynamic.Linear.from_float only works for one of' + \
str([float_mod.__name__ for float_mod in float_modules])
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
if type(mod) == nni.LinearReLU:
mod = mod[0]
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer = mod.qconfig.weight()
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.ao.quantization.qconfig import default_dynamic_qconfig
weight_observer = default_dynamic_qconfig.weight()
dtype = weight_observer.dtype
assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \
"dynamic quantized linear are qint8 and float16 got: {}".format(dtype)
weight_observer(mod.weight)
if dtype == torch.qint8:
qweight = _quantize_weight(mod.weight.float(), weight_observer)
elif dtype == torch.float16:
qweight = mod.weight.float()
else:
raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!')
qlinear = cls(mod.in_features, mod.out_features, dtype=dtype)
qlinear.set_weight_bias(qweight, mod.bias)
return qlinear
@classmethod
def from_reference(cls, ref_qlinear):
""" Create a (fbgemm/qnnpack) dynamic quantized module from a reference quantized
module
Args:
ref_qlinear (Module): a reference quantized module, either produced by
torch.ao.quantization functions or provided by the user
"""
qlinear = cls(ref_qlinear.in_features, ref_qlinear.out_features, dtype=ref_qlinear.weight_dtype)
qweight = ref_qlinear.get_quantized_weight()
bias = ref_qlinear.bias
qlinear.set_weight_bias(qweight, bias)
return qlinear
|
pytorch-master
|
torch/nn/quantized/dynamic/modules/linear.py
|
from .linear import Linear
from .rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell
from .conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
__all__ = [
'Linear',
'LSTM',
'GRU',
'LSTMCell',
'RNNCell',
'GRUCell',
'Conv1d',
'Conv2d',
'Conv3d',
'ConvTranspose1d',
'ConvTranspose2d',
'ConvTranspose3d',
]
|
pytorch-master
|
torch/nn/quantized/dynamic/modules/__init__.py
|
# coding=utf-8
r"""Dynamically quantized convolution modules."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch._ops import ops
from torch.nn.common_types import _size_1_t
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.quantized.modules.conv import _reverse_repeat_padding
import torch.nn.quantized.modules as nnq
import warnings
__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']
class Conv1d(nnq.Conv1d):
r"""A dynamically quantized conv module with floating point tensors as inputs and outputs.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv1d` and :class:`~torch.nn.quantized.dynamic.Conv1d` and
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.Conv1d` for other attributes.
Examples::
>>> m = nn.quantized.dynamic.Conv1d(16, 33, 3, stride=2)
>>> input = torch.randn(20, 16, 100)
>>> # xdoctest: +SKIP
>>> output = m(input)
"""
_FLOAT_MODULE = nn.Conv1d
_NNIQAT_CONV_BN_MODULE = None # type: ignore[assignment]
_NNI_CONV_RELU_MODULE = None # type: ignore[assignment]
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None,
reduce_range=True):
warnings.warn(
"The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format(
self._get_name()
)
)
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = padding if isinstance(padding, str) else _single(padding)
dilation = _single(dilation)
super(Conv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'DynamicQuantizedConv1d'
def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
if self.padding_mode != 'zeros':
# Padding in Conv1d is stored as (p, p), need to get (p,)
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return ops.quantized.conv1d_dynamic(input, self._packed_params, reduce_range)
class Conv2d(nnq.Conv2d):
r"""A dynamically quantized conv module with floating point tensors as inputs and outputs.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv2d` and :class:`~torch.nn.quantized.dynamic.Conv2d` and
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.Conv2d` for other attributes.
Examples::
>>> # With square kernels and equal stride
>>> m = nn.quantized.dynamic.Conv2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> input = torch.randn(20, 16, 50, 100)
>>> # xdoctest: +SKIP
>>> output = m(input)
"""
_FLOAT_MODULE = nn.Conv2d
_NNIQAT_CONV_BN_MODULE = None # type: ignore[assignment]
_NNI_CONV_RELU_MODULE = None # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
warnings.warn(
"The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format(
self._get_name()
)
)
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'DynamicQuantizedConv2d'
def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return ops.quantized.conv2d_dynamic(
input, self._packed_params, reduce_range)
class Conv3d(nnq.Conv3d):
r"""A dynamically quantized conv module with floating point tensors as inputs and outputs.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv3d` and :class:`~torch.nn.quantized.dynamic.Conv3d` and
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.Conv3d` for other attributes.
Examples::
>>> # With square kernels and equal stride
>>> m = nn.quantized.dynamic.Conv3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
>>> input = torch.randn(20, 16, 56, 56, 56)
>>> # xdoctest: +SKIP
>>> output = m(input)
"""
_FLOAT_MODULE = nn.Conv3d
_NNIQAT_CONV_BN_MODULE = None # type: ignore[assignment]
_NNI_CONV_RELU_MODULE = None # type: ignore[assignment]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
warnings.warn(
"The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format(
self._get_name()
)
)
assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
super(Conv3d, self)._init(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'DynamicQuantizedConv3d'
def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return ops.quantized.conv3d_dynamic(
input, self._packed_params, reduce_range)
class ConvTranspose1d(nnq.ConvTranspose1d):
r"""A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose1d`.
For special notes, please, see :class:`~torch.nn.quantized.dynamic.Conv1d`
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose1d` for other attributes.
Examples::
>>> # With square kernels and equal stride
>>> # xdoctest: +SKIP
>>> m = nndq.ConvTranspose1d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nndq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> downsample = nndq.Conv1d(16, 16, 3, stride=2, padding=1)
>>> upsample = nndq.ConvTranspose1d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12])
"""
_FLOAT_MODULE = nn.ConvTranspose1d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True,
dilation=1, padding_mode='zeros', device=None, dtype=None):
warnings.warn(
"The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format(
self._get_name()
)
)
factory_kwargs = {'device': device, 'dtype': dtype}
super(ConvTranspose1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, output_padding,
groups, bias, dilation, padding_mode, **factory_kwargs)
def _get_name(self):
return 'DynamicQuantizedConvTranpose1d'
def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
return torch.ops.quantized.conv_transpose1d_dynamic(
input, self._packed_params, reduce_range)
class ConvTranspose2d(nnq.ConvTranspose2d):
r"""A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose2d`.
For special notes, please, see :class:`~torch.nn.quantized.dynamic.Conv2d`
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose2d` for other attributes.
Examples::
>>> # With square kernels and equal stride
>>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # xdoctest: +SKIP
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
_FLOAT_MODULE = nn.ConvTranspose2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True,
dilation=1, padding_mode='zeros', device=None, dtype=None):
warnings.warn(
"The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format(
self._get_name()
)
)
factory_kwargs = {'device': device, 'dtype': dtype}
super(ConvTranspose2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, output_padding,
groups, bias, dilation, padding_mode, **factory_kwargs)
def _get_name(self):
return 'DynamicQuantizedConvTranpose2d'
def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
return ops.quantized.conv_transpose2d_dynamic(
input, self._packed_params, reduce_range)
class ConvTranspose3d(nnq.ConvTranspose3d):
r"""A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose3d`.
For special notes, please, see :class:`~torch.nn.quantized.dynamic.Conv3d`
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose3d` for other attributes.
Examples::
>>> # With cubic kernels and equal stride
>>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
>>> # non-cubic kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
>>> # xdoctest: +SKIP
>>> output = m(input)
>>> # exact output size can be also specified as an argument
>>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
>>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(input)
>>> h.size()
torch.Size([1, 16, 6, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12, 12])
"""
_FLOAT_MODULE = nn.ConvTranspose3d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True,
dilation=1, padding_mode='zeros', device=None, dtype=None):
warnings.warn(
"The current implementation of the {} module has poor numerical accuracy and its use is not recommended".format(
self._get_name()
)
)
factory_kwargs = {'device': device, 'dtype': dtype}
super(ConvTranspose3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, output_padding,
groups, bias, dilation, padding_mode, **factory_kwargs)
def _get_name(self):
return 'DynamicQuantizedConvTranpose3d'
def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor:
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, T, H, W)`!")
return ops.quantized.conv_transpose3d_dynamic(
input, self._packed_params, reduce_range)
|
pytorch-master
|
torch/nn/quantized/dynamic/modules/conv.py
|
import numbers
import warnings
import torch
import torch.nn as nn
from torch import Tensor # noqa: F401
from torch._jit_internal import Tuple, Optional, List, Union, Dict # noqa: F401
from torch.nn.utils.rnn import PackedSequence
from torch.nn.quantized.modules.utils import _quantize_weight
__all__ = ['pack_weight_bias', 'PackedParameter', 'RNNBase', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', 'LSTMCell',
'GRUCell']
def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
return tensor.index_select(dim, permutation)
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
warnings.warn("apply_permutation is deprecated, please use tensor.index_select(dim, permutation) instead")
return _apply_permutation(tensor, permutation, dim)
def pack_weight_bias(qweight, bias, dtype):
if dtype == torch.qint8:
# for each layer, for each direction we need to quantize and pack
# weights and pack parameters in this order:
#
# w_ih, w_hh
packed_weight = \
torch.ops.quantized.linear_prepack(qweight, bias)
return packed_weight
else:
# for each layer, for each direction we need to quantize and pack
# weights and pack parameters in this order:
#
# packed_ih, packed_hh, b_ih, b_hh
packed_weight = torch.ops.quantized.linear_prepack_fp16(
qweight, bias)
return packed_weight
class PackedParameter(torch.nn.Module):
def __init__(self, param):
super(PackedParameter, self).__init__()
self.param = param
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(PackedParameter, self)._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'param'] = self.param
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.param = state_dict[prefix + 'param']
super(PackedParameter, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
class RNNBase(torch.nn.Module):
_FLOAT_MODULE = nn.RNNBase
_version = 2
def __init__(self, mode, input_size, hidden_size,
num_layers=1, bias=True, batch_first=False,
dropout=0., bidirectional=False, dtype=torch.qint8):
super(RNNBase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = float(dropout)
self.bidirectional = bidirectional
self.dtype = dtype
self.version = 2
self.training = False
num_directions = 2 if bidirectional else 1
# "type: ignore" is required since ints and Numbers are not fully comparable
# https://github.com/python/mypy/issues/8566
if not isinstance(dropout, numbers.Number) \
or not 0 <= dropout <= 1 or isinstance(dropout, bool): # type: ignore[operator]
raise ValueError("dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed")
if dropout > 0 and num_layers == 1: # type: ignore[operator]
warnings.warn("dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers))
if mode == 'LSTM':
gate_size = 4 * hidden_size
elif mode == 'GRU':
gate_size = 3 * hidden_size
else:
raise ValueError("Unrecognized RNN mode: " + mode)
_all_weight_values = []
for layer in range(num_layers):
for direction in range(num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * num_directions
w_ih = torch.randn(gate_size, layer_input_size).to(torch.float)
w_hh = torch.randn(gate_size, hidden_size).to(torch.float)
b_ih = torch.randn(gate_size).to(torch.float)
b_hh = torch.randn(gate_size).to(torch.float)
if dtype == torch.qint8:
w_ih = torch.quantize_per_tensor(w_ih, scale=0.1, zero_point=0, dtype=torch.qint8)
w_hh = torch.quantize_per_tensor(w_hh, scale=0.1, zero_point=0, dtype=torch.qint8)
packed_ih = \
torch.ops.quantized.linear_prepack(w_ih, b_ih)
packed_hh = \
torch.ops.quantized.linear_prepack(w_hh, b_hh)
if self.version is None or self.version < 2:
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
packed_ih, packed_hh, b_ih, b_hh)
else:
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
packed_ih, packed_hh, b_ih, b_hh, True)
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih)
packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh)
cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
packed_ih, packed_hh)
_all_weight_values.append(PackedParameter(cell_params))
self._all_weight_values = torch.nn.ModuleList(_all_weight_values)
def _get_name(self):
return 'DynamicQuantizedRNN'
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if self.num_layers != 1:
s += ', num_layers={num_layers}'
if self.bias is not True:
s += ', bias={bias}'
if self.batch_first is not False:
s += ', batch_first={batch_first}'
if self.dropout != 0:
s += ', dropout={dropout}'
if self.bidirectional is not False:
s += ', bidirectional={bidirectional}'
return s.format(**self.__dict__)
def __repr__(self):
# We don't want to show `ModuleList` children, hence custom
# `__repr__`. This is the same as nn.Module.__repr__, except the check
# for the `PackedParameter` and `nn.ModuleList`.
# You should still override `extra_repr` to add more info.
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self._modules.items():
if isinstance(module, (PackedParameter, nn.ModuleList)):
continue
mod_str = repr(module)
mod_str = nn.modules.module._addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
expected_input_dim = 2 if batch_sizes is not None else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
'input must have {} dimensions, got {}'.format(
expected_input_dim, input.dim()))
if self.input_size != input.size(-1):
raise RuntimeError(
'input.size(-1) must be equal to input_size. Expected {}, got {}'.format(
self.input_size, input.size(-1)))
def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (self.num_layers * num_directions,
mini_batch, self.hidden_size)
return expected_hidden_size
def check_hidden_size(
self, hx: Tensor, expected_hidden_size: Tuple[int, int, int],
msg: str = 'Expected hidden size {}, got {}'
) -> None:
if hx.size() != expected_hidden_size:
raise RuntimeError(msg.format(
expected_hidden_size, list(hx.size())))
def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None:
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size,
msg='Expected hidden size {}, got {}')
def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor:
if permutation is None:
return hx
return _apply_permutation(hx, permutation)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
self.version = version
super(RNNBase, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def set_weight_bias(self, weight_bias_dict):
def weight_bias_name(ihhh, layer, suffix):
weight_name = "weight_{}_l{}{}".format(ihhh, layer, suffix)
bias_name = "bias_{}_l{}{}".format(ihhh, layer, suffix)
return weight_name, bias_name
num_directions = 2 if self.bidirectional else 1
# TODO: dedup with __init__ of RNNBase
_all_weight_values = []
for layer in range(self.num_layers):
for direction in range(num_directions):
suffix = "_reverse" if direction == 1 else ""
w_ih_name, b_ih_name = weight_bias_name("ih", layer, suffix)
w_hh_name, b_hh_name = weight_bias_name("hh", layer, suffix)
w_ih = weight_bias_dict[w_ih_name]
b_ih = weight_bias_dict[b_ih_name]
w_hh = weight_bias_dict[w_hh_name]
b_hh = weight_bias_dict[b_hh_name]
if w_ih.dtype == torch.qint8:
packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih)
packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh)
if self.version is None or self.version < 2:
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
packed_ih, packed_hh, b_ih, b_hh)
else:
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
packed_ih, packed_hh, b_ih, b_hh, True)
else:
packed_ih = torch.ops.quantized.linear_prepack_fp16(w_ih, b_ih)
packed_hh = torch.ops.quantized.linear_prepack_fp16(w_hh, b_hh)
cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
packed_ih, packed_hh)
_all_weight_values.append(PackedParameter(cell_params))
self._all_weight_values = torch.nn.ModuleList(_all_weight_values)
@classmethod
def from_float(cls, mod):
assert type(mod) in set(
[torch.nn.LSTM,
torch.nn.GRU]
), 'nn.quantized.dynamic.RNNBase.from_float only works for nn.LSTM and nn.GRU'
assert hasattr(
mod,
'qconfig'
), 'Input float module must have qconfig defined'
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer_method = mod.qconfig.weight
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.ao.quantization.qconfig import default_dynamic_qconfig
weight_observer_method = default_dynamic_qconfig.weight
dtype = weight_observer_method().dtype
supported_scalar_types = [torch.qint8, torch.float16]
if dtype not in supported_scalar_types:
raise RuntimeError('Unsupported dtype for dynamic RNN quantization: {}'.format(dtype))
# RNNBase can be either LSTM or GRU
qRNNBase: Union[LSTM, GRU]
if mod.mode == 'LSTM':
qRNNBase = LSTM(mod.input_size, mod.hidden_size, mod.num_layers,
mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype)
elif mod.mode == 'GRU':
qRNNBase = GRU(mod.input_size, mod.hidden_size, mod.num_layers,
mod.bias, mod.batch_first, mod.dropout, mod.bidirectional, dtype)
else:
raise NotImplementedError('Only LSTM/GRU is supported for QuantizedRNN for now')
num_directions = 2 if mod.bidirectional else 1
assert mod.bias
_all_weight_values = []
for layer in range(qRNNBase.num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
def retrieve_weight_bias(ihhh):
weight_name = 'weight_{}_l{}{}'.format(ihhh, layer, suffix)
bias_name = 'bias_{}_l{}{}'.format(ihhh, layer, suffix)
weight = getattr(mod, weight_name)
bias = getattr(mod, bias_name)
return weight, bias
weight_ih, bias_ih = retrieve_weight_bias('ih')
weight_hh, bias_hh = retrieve_weight_bias('hh')
if dtype == torch.qint8:
def quantize_and_pack(w, b):
weight_observer = weight_observer_method()
weight_observer(w)
qweight = _quantize_weight(w.float(), weight_observer)
packed_weight = \
torch.ops.quantized.linear_prepack(qweight, b)
return packed_weight
packed_ih = quantize_and_pack(weight_ih, bias_ih)
packed_hh = quantize_and_pack(weight_hh, bias_hh)
if qRNNBase.version is None or qRNNBase.version < 2:
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
packed_ih, packed_hh, bias_ih, bias_hh)
else:
cell_params = torch.ops.quantized.make_quantized_cell_params_dynamic(
packed_ih, packed_hh, bias_ih, bias_hh, True)
elif dtype == torch.float16:
packed_ih = torch.ops.quantized.linear_prepack_fp16(
weight_ih.float(), bias_ih)
packed_hh = torch.ops.quantized.linear_prepack_fp16(
weight_hh.float(), bias_hh)
cell_params = torch.ops.quantized.make_quantized_cell_params_fp16(
packed_ih, packed_hh)
else:
raise RuntimeError('Unsupported dtype specified for dynamic quantized LSTM!')
_all_weight_values.append(PackedParameter(cell_params))
qRNNBase._all_weight_values = torch.nn.ModuleList(_all_weight_values)
return qRNNBase
def _weight_bias(self):
# Returns a dict of weights and biases
weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}}
count = 0
num_directions = 2 if self.bidirectional else 1
for layer in range(self.num_layers):
for direction in range(num_directions):
suffix = '_reverse' if direction == 1 else ''
key_name1 = 'weight_ih_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
key_name2 = 'weight_hh_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
# packed weights are part of torchbind class, CellParamsSerializationType
# Within the packed weight class, the weight and bias are accessible as Tensors
packed_weight_bias = self._all_weight_values[count].param.__getstate__()[0][4]
weight_bias_dict['weight'][key_name1] = packed_weight_bias[0].__getstate__()[0][0]
weight_bias_dict['weight'][key_name2] = packed_weight_bias[1].__getstate__()[0][0]
key_name1 = 'bias_ih_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
key_name2 = 'bias_hh_l{layer_idx}{suffix}'.format(layer_idx=layer, suffix=suffix)
weight_bias_dict['bias'][key_name1] = packed_weight_bias[0].__getstate__()[0][1]
weight_bias_dict['bias'][key_name2] = packed_weight_bias[1].__getstate__()[0][1]
count = count + 1
return weight_bias_dict
def get_weight(self):
return self._weight_bias()['weight']
def get_bias(self):
return self._weight_bias()['bias']
class LSTM(RNNBase):
r"""
A dynamic quantized LSTM module with floating point tensor as inputs and outputs.
We adopt the same interface as `torch.nn.LSTM`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.LSTM for documentation.
Examples::
>>> rnn = nn.LSTM(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> c0 = torch.randn(2, 3, 20)
>>> output, (hn, cn) = rnn(input, (h0, c0))
"""
_FLOAT_MODULE = nn.LSTM
__overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
def __init__(self, *args, **kwargs):
super(LSTM, self).__init__('LSTM', *args, **kwargs)
def _get_name(self):
return 'DynamicQuantizedLSTM'
def forward_impl(
self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]],
batch_sizes: Optional[Tensor], max_batch_size: int,
sorted_indices: Optional[Tensor]
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
if hx is None:
num_directions = 2 if self.bidirectional else 1
zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
_all_params = ([m.param for m in self._all_weight_values])
if batch_sizes is None:
result = torch.quantized_lstm(input, hx, _all_params, self.bias, self.num_layers,
float(self.dropout), self.training, self.bidirectional,
self.batch_first, dtype=self.dtype, use_dynamic=True)
else:
result = torch.quantized_lstm(input, batch_sizes, hx, _all_params, self.bias,
self.num_layers, float(self.dropout), self.training,
self.bidirectional, dtype=self.dtype, use_dynamic=True)
output = result[0]
hidden = result[1:]
return output, hidden
@torch.jit.export
def forward_tensor(
self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
output, hidden = self.forward_impl(
input, hx, batch_sizes, max_batch_size, sorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.export
def forward_packed(
self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]:
input_, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
output_, hidden = self.forward_impl(
input_, hx, batch_sizes, max_batch_size, sorted_indices)
output = PackedSequence(output_, batch_sizes,
sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
# "type: ignore" is required due to issue #43072
def permute_hidden( # type: ignore[override]
self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]
) -> Tuple[Tensor, Tensor]:
if permutation is None:
return hx
return _apply_permutation(hx[0], permutation), _apply_permutation(hx[1], permutation)
# "type: ignore" is required due to issue #43072
def check_forward_args( # type: ignore[override]
self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor]
) -> None:
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden[0], expected_hidden_size,
'Expected hidden[0] size {}, got {}')
self.check_hidden_size(hidden[1], expected_hidden_size,
'Expected hidden[1] size {}, got {}')
@torch.jit.ignore
def forward(self, input, hx=None):
if isinstance(input, PackedSequence):
return self.forward_packed(input, hx)
else:
return self.forward_tensor(input, hx)
@classmethod
def from_float(cls, mod):
return super(LSTM, cls).from_float(mod)
@classmethod
def from_reference(cls, ref_mod):
assert hasattr(ref_mod, "weight_ih_l0_dtype"), "We are assuming weight_ih_l0 "
"exists in LSTM, may need to relax the assumption to support the use case"
qmod = cls(
ref_mod.input_size,
ref_mod.hidden_size,
ref_mod.num_layers,
ref_mod.bias,
ref_mod.batch_first,
ref_mod.dropout,
ref_mod.bidirectional,
# assuming there is layer 0, which should be OK
ref_mod.weight_ih_l0_dtype,
)
qmod.set_weight_bias(ref_mod.get_quantized_weight_bias_dict())
return qmod
class GRU(RNNBase):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) * n_t + z_t * h_{(t-1)}
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
:math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two GRUs together to form a `stacked GRU`,
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
Inputs: input, h_0
- **input** of shape `(seq_len, batch, input_size)`: tensor containing the features
of the input sequence. The input can also be a packed variable length
sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence`
for details.
- **h_0** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the initial hidden state for each element in the batch.
Defaults to zero if not provided. If the RNN is bidirectional,
num_directions should be 2, else it should be 1.
Outputs: output, h_n
- **output** of shape `(seq_len, batch, num_directions * hidden_size)`: tensor
containing the output features h_t from the last layer of the GRU,
for each `t`. If a :class:`torch.nn.utils.rnn.PackedSequence` has been
given as the input, the output will also be a packed sequence.
For the unpacked case, the directions can be separated
using ``output.view(seq_len, batch, num_directions, hidden_size)``,
with forward and backward being direction `0` and `1` respectively.
Similarly, the directions can be separated in the packed case.
- **h_n** of shape `(num_layers * num_directions, batch, hidden_size)`: tensor
containing the hidden state for `t = seq_len`
Like *output*, the layers can be separated using
``h_n.view(num_layers, num_directions, batch, hidden_size)``.
Shape:
- Input1: :math:`(L, N, H_{in})` tensor containing input features where
:math:`H_{in}=\text{input\_size}` and `L` represents a sequence length.
- Input2: :math:`(S, N, H_{out})` tensor
containing the initial hidden state for each element in the batch.
:math:`H_{out}=\text{hidden\_size}`
Defaults to zero if not provided. where :math:`S=\text{num\_layers} * \text{num\_directions}`
If the RNN is bidirectional, num_directions should be 2, else it should be 1.
- Output1: :math:`(L, N, H_{all})` where :math:`H_{all}=\text{num\_directions} * \text{hidden\_size}`
- Output2: :math:`(S, N, H_{out})` tensor containing the next hidden state
for each element in the batch
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
(W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
(W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
(b_ir|b_iz|b_in), of shape `(3*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
(b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. include:: ../cudnn_persistent_rnn.rst
Examples::
>>> rnn = nn.GRU(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
"""
_FLOAT_MODULE = nn.GRU
__overloads__ = {'forward': ['forward_packed', 'forward_tensor']}
def __init__(self, *args, **kwargs):
super(GRU, self).__init__('GRU', *args, **kwargs)
def _get_name(self):
return 'DynamicQuantizedGRU'
def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None:
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size,
'Expected hidden size {}, got {}')
def forward_impl(
self, input: Tensor, hx: Optional[Tensor],
batch_sizes: Optional[Tensor], max_batch_size: int,
sorted_indices: Optional[Tensor]
) -> Tuple[Tensor, Tensor]:
if hx is None:
num_directions = 2 if self.bidirectional else 1
zeros = torch.zeros(self.num_layers * num_directions,
max_batch_size, self.hidden_size,
dtype=input.dtype, device=input.device)
hx = zeros
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
_all_params = ([m.param for m in self._all_weight_values])
if batch_sizes is None:
result = torch.quantized_gru(input,
hx,
_all_params,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first)
else:
result = torch.quantized_gru(input,
batch_sizes,
hx,
_all_params,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional)
output = result[0]
hidden = result[1]
return output, hidden
@torch.jit.export
def forward_tensor(
self, input: Tensor, hx: Optional[Tensor] = None
) -> Tuple[Tensor, Tensor]:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
output, hidden = self.forward_impl(
input, hx, batch_sizes, max_batch_size, sorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
@torch.jit.export
def forward_packed(
self, input: PackedSequence, hx: Optional[Tensor] = None
) -> Tuple[PackedSequence, Tensor]:
input_, batch_sizes, sorted_indices, unsorted_indices = input
max_batch_size = batch_sizes[0]
max_batch_size = int(max_batch_size)
output_, hidden = self.forward_impl(
input_, hx, batch_sizes, max_batch_size, sorted_indices)
output = PackedSequence(output_, batch_sizes,
sorted_indices, unsorted_indices)
return output, self.permute_hidden(hidden, unsorted_indices)
def permute_hidden(
self, hx: Tensor, permutation: Optional[Tensor]
) -> Tensor:
if permutation is None:
return hx
return _apply_permutation(hx, permutation)
@torch.jit.ignore
def forward(self, input, hx=None):
if isinstance(input, PackedSequence):
return self.forward_packed(input, hx)
else:
return self.forward_tensor(input, hx)
@classmethod
def from_float(cls, mod):
return super(GRU, cls).from_float(mod)
class RNNCellBase(torch.nn.Module):
# _FLOAT_MODULE = nn.CellRNNBase
__constants__ = ['input_size', 'hidden_size', 'bias']
def __init__(self, input_size, hidden_size, bias=True, num_chunks=4, dtype=torch.qint8):
super(RNNCellBase, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_dtype = dtype
if bias:
self.bias_ih = torch.randn(num_chunks * hidden_size).to(dtype=torch.float)
self.bias_hh = torch.randn(num_chunks * hidden_size).to(dtype=torch.float)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
weight_ih = torch.randn(num_chunks * hidden_size, input_size).to(torch.float)
weight_hh = torch.randn(num_chunks * hidden_size, hidden_size).to(torch.float)
if dtype == torch.qint8:
weight_ih = torch.quantize_per_tensor(weight_ih, scale=1, zero_point=0, dtype=torch.qint8)
weight_hh = torch.quantize_per_tensor(weight_hh, scale=1, zero_point=0, dtype=torch.qint8)
if dtype == torch.qint8:
# for each layer, for each direction we need to quantize and pack
# weights and pack parameters in this order:
#
# w_ih, w_hh
packed_weight_ih = \
torch.ops.quantized.linear_prepack(weight_ih, self.bias_ih)
packed_weight_hh = \
torch.ops.quantized.linear_prepack(weight_hh, self.bias_hh)
else:
# for each layer, for each direction we need to quantize and pack
# weights and pack parameters in this order:
#
# packed_ih, packed_hh, b_ih, b_hh
packed_weight_ih = torch.ops.quantized.linear_prepack_fp16(
weight_ih, self.bias_ih)
packed_weight_hh = torch.ops.quantized.linear_prepack_fp16(
weight_hh, self.bias_hh)
self._packed_weight_ih = packed_weight_ih
self._packed_weight_hh = packed_weight_hh
def _get_name(self):
return 'DynamicQuantizedRNNBase'
def extra_repr(self):
s = '{input_size}, {hidden_size}'
if 'bias' in self.__dict__ and self.bias is not True:
s += ', bias={bias}'
if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh":
s += ', nonlinearity={nonlinearity}'
return s.format(**self.__dict__)
def check_forward_input(self, input):
if input.size(1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size: got {}, expected {}".format(
input.size(1), self.input_size))
def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None:
if input.size(0) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(0), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
@classmethod
def from_float(cls, mod):
assert type(mod) in set([torch.nn.LSTMCell,
torch.nn.GRUCell,
torch.nn.RNNCell]), 'nn.quantized.dynamic.RNNCellBase.from_float \
only works for nn.LSTMCell, nn.GRUCell and nn.RNNCell'
assert hasattr(
mod, 'qconfig'), 'Input float module must have qconfig defined'
if mod.qconfig is not None and mod.qconfig.weight is not None:
weight_observer_method = mod.qconfig.weight
else:
# We have the circular import issues if we import the qconfig in the beginning of this file:
# https://github.com/pytorch/pytorch/pull/24231. The current workaround is to postpone the
# import until we need it.
from torch.ao.quantization.qconfig import default_dynamic_qconfig
weight_observer_method = default_dynamic_qconfig.weight
dtype = weight_observer_method().dtype
supported_scalar_types = [torch.qint8, torch.float16]
if dtype not in supported_scalar_types:
raise RuntimeError('Unsupported dtype for dynamic RNN quantization: {}'.format(dtype))
qRNNCellBase: Union[LSTMCell, GRUCell, RNNCell]
if type(mod) == torch.nn.LSTMCell:
qRNNCellBase = LSTMCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype)
elif type(mod) == torch.nn.GRUCell:
qRNNCellBase = GRUCell(mod.input_size, mod.hidden_size, bias=mod.bias, dtype=dtype)
elif type(mod) == torch.nn.RNNCell:
qRNNCellBase = RNNCell(mod.input_size, mod.hidden_size, bias=mod.bias, nonlinearity=mod.nonlinearity, dtype=dtype)
else:
raise NotImplementedError('Only LSTMCell, GRUCell and RNNCell \
are supported for QuantizedRNN for now')
assert mod.bias
def _observe_and_quantize_weight(weight):
if dtype == torch.qint8:
weight_observer = weight_observer_method()
weight_observer(weight)
qweight = _quantize_weight(weight.float(), weight_observer)
return qweight
else:
return weight.float()
qRNNCellBase._packed_weight_ih = pack_weight_bias(_observe_and_quantize_weight(mod.weight_ih), mod.bias_ih, dtype)
qRNNCellBase._packed_weight_hh = pack_weight_bias(_observe_and_quantize_weight(mod.weight_hh), mod.bias_hh, dtype)
return qRNNCellBase
@classmethod
def from_reference(cls, ref_mod):
assert hasattr(ref_mod, "weight_ih_dtype"), "We are assuming weight_ih "
"exists in reference module, may need to relax the assumption to support the use case"
if hasattr(ref_mod, "nonlinearity"):
qmod = cls(
ref_mod.input_size,
ref_mod.hidden_size,
ref_mod.bias,
ref_mod.nonlinearity,
dtype=ref_mod.weight_ih_dtype
)
else:
qmod = cls(
ref_mod.input_size,
ref_mod.hidden_size,
ref_mod.bias,
dtype=ref_mod.weight_ih_dtype
)
weight_bias_dict = {
"weight": {
"weight_ih": ref_mod.get_quantized_weight_ih(),
"weight_hh": ref_mod.get_quantized_weight_hh(),
},
"bias": {
"bias_ih": ref_mod.bias_ih,
"bias_hh": ref_mod.bias_hh,
}
}
qmod.set_weight_bias(weight_bias_dict)
return qmod
def _weight_bias(self):
# Returns a dict of weights and biases
weight_bias_dict: Dict[str, Dict] = {'weight' : {}, 'bias' : {}}
w1, b1 = self._packed_weight_ih.__getstate__()[0]
w2, b2 = self._packed_weight_hh.__getstate__()[0]
# TODO: these can be simplified to one level? e.g. using weight_ih as key
# directly
weight_bias_dict['weight']['weight_ih'] = w1
weight_bias_dict['weight']['weight_hh'] = w2
weight_bias_dict['bias']['bias_ih'] = b1
weight_bias_dict['bias']['bias_hh'] = b2
return weight_bias_dict
def get_weight(self):
return self._weight_bias()['weight']
def get_bias(self):
return self._weight_bias()['bias']
def set_weight_bias(self, weight_bias_dict):
# TODO: these can be simplified to one level? e.g. using weight_ih as key
# directly
self._packed_weight_ih = pack_weight_bias(
weight_bias_dict["weight"]["weight_ih"],
weight_bias_dict["bias"]["bias_ih"],
self.weight_dtype)
self._packed_weight_hh = pack_weight_bias(
weight_bias_dict["weight"]["weight_hh"],
weight_bias_dict["bias"]["bias_hh"],
self.weight_dtype)
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(RNNCellBase, self)._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + '_packed_weight_ih'] = self._packed_weight_ih
destination[prefix + '_packed_weight_hh'] = self._packed_weight_hh
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self._packed_weight_ih = state_dict.pop(prefix + '_packed_weight_ih')
self._packed_weight_hh = state_dict.pop(prefix + '_packed_weight_hh')
super(RNNCellBase, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
class RNNCell(RNNCellBase):
r"""An Elman RNN cell with tanh or ReLU non-linearity.
A dynamic quantized RNNCell module with floating point tensor as inputs and outputs.
Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.RNNCell`,
please see https://pytorch.org/docs/stable/nn.html#torch.nn.RNNCell for documentation.
Examples::
>>> rnn = nn.RNNCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx = rnn(input[i], hx)
... output.append(hx)
"""
__constants__ = ['input_size', 'hidden_size', 'bias', 'nonlinearity']
def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh", dtype=torch.qint8):
super(RNNCell, self).__init__(input_size, hidden_size, bias, num_chunks=1, dtype=dtype)
self.nonlinearity = nonlinearity
def _get_name(self):
return 'DynamicQuantizedRNNCell'
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
if self.nonlinearity == "tanh":
ret = torch.ops.quantized.quantized_rnn_tanh_cell_dynamic(
input, hx,
self._packed_weight_ih, self._packed_weight_hh,
self.bias_ih, self.bias_hh)
elif self.nonlinearity == "relu":
ret = torch.ops.quantized.quantized_rnn_relu_cell_dynamic(
input, hx,
self._packed_weight_ih, self._packed_weight_hh,
self.bias_ih, self.bias_hh)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(
"Unknown nonlinearity: {}".format(self.nonlinearity))
return ret
@classmethod
def from_float(cls, mod):
return super(RNNCell, cls).from_float(mod)
class LSTMCell(RNNCellBase):
r"""A long short-term memory (LSTM) cell.
A dynamic quantized LSTMCell module with floating point tensor as inputs and outputs.
Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.LSTMCell`,
please see https://pytorch.org/docs/stable/nn.html#torch.nn.LSTMCell for documentation.
Examples::
>>> rnn = nn.LSTMCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> cx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx, cx = rnn(input[i], (hx, cx))
... output.append(hx)
"""
def __init__(self, *args, **kwargs):
super(LSTMCell, self).__init__(*args, num_chunks=4, **kwargs) # type: ignore[misc]
def _get_name(self):
return 'DynamicQuantizedLSTMCell'
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
self.check_forward_input(input)
if hx is None:
zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return torch.ops.quantized.quantized_lstm_cell_dynamic(
input, hx,
self._packed_weight_ih, self._packed_weight_hh,
self.bias_ih, self.bias_hh)
@classmethod
def from_float(cls, mod):
return super(LSTMCell, cls).from_float(mod)
class GRUCell(RNNCellBase):
r"""A gated recurrent unit (GRU) cell
A dynamic quantized GRUCell module with floating point tensor as inputs and outputs.
Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.GRUCell`,
please see https://pytorch.org/docs/stable/nn.html#torch.nn.GRUCell for documentation.
Examples::
>>> rnn = nn.GRUCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx = rnn(input[i], hx)
... output.append(hx)
"""
def __init__(self, input_size, hidden_size, bias=True, dtype=torch.qint8):
super(GRUCell, self).__init__(input_size, hidden_size, bias, num_chunks=3, dtype=dtype)
def _get_name(self):
return 'DynamicQuantizedGRUCell'
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
self.check_forward_hidden(input, hx, '')
return torch.ops.quantized.quantized_gru_cell_dynamic(
input, hx,
self._packed_weight_ih, self._packed_weight_hh,
self.bias_ih, self.bias_hh,
)
@classmethod
def from_float(cls, mod):
return super(GRUCell, cls).from_float(mod)
|
pytorch-master
|
torch/nn/quantized/dynamic/modules/rnn.py
|
import torch
import torch.nn.quantized.functional
import torch.nn.intrinsic as nni
from torch import Tensor
class _BatchNorm(torch.nn.modules.batchnorm._BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(num_features, eps, momentum, True, True, **factory_kwargs)
self.register_buffer('scale', torch.tensor(1.0, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(0, **factory_kwargs))
@staticmethod
def from_float(cls, mod):
activation_post_process = mod.activation_post_process
if type(mod) == cls._NNI_BN_RELU_MODULE:
mod = mod[0]
scale, zero_point = activation_post_process.calculate_qparams()
new_mod = cls(mod.num_features, mod.eps)
new_mod.weight = mod.weight
new_mod.bias = mod.bias
new_mod.running_mean = mod.running_mean
new_mod.running_var = mod.running_var
new_mod.scale = scale
new_mod.zero_point = zero_point
return new_mod
@classmethod
def from_reference(cls, bn, output_scale, output_zero_point):
qbn = cls(
bn.num_features,
bn.eps,
bn.momentum,
device=bn.weight.device,
dtype=bn.weight.dtype
)
qbn.weight = bn.weight
qbn.bias = bn.bias
qbn.running_mean = bn.running_mean
qbn.running_var = bn.running_var
qbn.scale = output_scale
qbn.zero_point = output_zero_point
return qbn
class BatchNorm2d(_BatchNorm):
r"""This is the quantized version of :class:`~torch.nn.BatchNorm2d`.
"""
_NNI_BN_RELU_MODULE = nni.BNReLU2d
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(num_features, eps, momentum, **factory_kwargs)
def _get_name(self):
return 'QuantizedBatchNorm2d'
def _check_input_dim(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
def forward(self, input: Tensor) -> Tensor:
# disabling this since this is not symbolically traceable
# self._check_input_dim(input)
return torch.ops.quantized.batch_norm2d(
input, self.weight, self.bias, self.running_mean,
self.running_var, self.eps, self.scale, self.zero_point)
@classmethod
def from_float(cls, mod):
return _BatchNorm.from_float(cls, mod)
class BatchNorm3d(_BatchNorm):
r"""This is the quantized version of :class:`~torch.nn.BatchNorm3d`.
"""
_NNI_BN_RELU_MODULE = nni.BNReLU3d
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(num_features, eps, momentum, **factory_kwargs)
def _get_name(self):
return 'QuantizedBatchNorm3d'
def _check_input_dim(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, H, W)`!")
def forward(self, input: Tensor) -> Tensor:
# disabling this since this is not symbolically traceable
# self._check_input_dim(input)
return torch.ops.quantized.batch_norm3d(
input, self.weight, self.bias, self.running_mean,
self.running_var, self.eps, self.scale, self.zero_point)
@classmethod
def from_float(cls, mod):
return _BatchNorm.from_float(cls, mod)
|
pytorch-master
|
torch/nn/quantized/modules/batchnorm.py
|
from typing import List
import torch
from torch import Tensor
from torch._ops import ops
__all__ = ['FloatFunctional', 'FXFloatFunctional', 'QFunctional']
class FloatFunctional(torch.nn.Module):
r"""State collector class for float operations.
The instance of this class can be used instead of the ``torch.`` prefix for
some operations. See example usage below.
.. note::
This class does not provide a ``forward`` hook. Instead, you must use
one of the underlying functions (e.g. ``add``).
Examples::
>>> f_add = FloatFunctional()
>>> a = torch.tensor(3.0)
>>> b = torch.tensor(4.0)
>>> f_add.add(a, b) # Equivalent to ``torch.add(a, b)``
Valid operation names:
- add
- cat
- mul
- add_relu
- add_scalar
- mul_scalar
"""
def __init__(self):
super(FloatFunctional, self).__init__()
self.activation_post_process = torch.nn.Identity()
def forward(self, x):
raise RuntimeError("FloatFunctional is not intended to use the " +
"'forward'. Please use the underlying operation")
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
def add(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.add(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.mul(x, y)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.mul(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.cat``"""
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
r = torch.cat(x, dim=dim)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
r = torch.nn.functional.relu(r)
r = self.activation_post_process(r)
return r
class FXFloatFunctional(torch.nn.Module):
r""" module to replace FloatFunctional module before FX graph mode quantization,
since activation_post_process will be inserted in top level module directly
Valid operation names:
- add
- cat
- mul
- add_relu
- add_scalar
- mul_scalar
"""
def forward(self, x):
raise RuntimeError("FloatFunctional is not intended to use the " +
"'forward'. Please use the underlying operation")
r"""Operation equivalent to ``torch.add(Tensor, Tensor)``"""
def add(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
return r
r"""Operation equivalent to ``torch.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.add(x, y)
return r
r"""Operation equivalent to ``torch.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.mul(x, y)
return r
r"""Operation equivalent to ``torch.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = torch.mul(x, y)
return r
r"""Operation equivalent to ``torch.cat``"""
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
r = torch.cat(x, dim=dim)
return r
r"""Operation equivalent to ``relu(torch.add(x,y))``"""
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
r = torch.add(x, y)
r = torch.nn.functional.relu(r)
return r
class QFunctional(torch.nn.Module):
r"""Wrapper class for quantized operations.
The instance of this class can be used instead of the
``torch.ops.quantized`` prefix. See example usage below.
.. note::
This class does not provide a ``forward`` hook. Instead, you must use
one of the underlying functions (e.g. ``add``).
Examples::
>>> q_add = QFunctional()
>>> # xdoctest: +SKIP
>>> a = torch.quantize_per_tensor(torch.tensor(3.0), 1.0, 0, torch.qint32)
>>> b = torch.quantize_per_tensor(torch.tensor(4.0), 1.0, 0, torch.qint32)
>>> q_add.add(a, b) # Equivalent to ``torch.ops.quantized.add(a, b, 1.0, 0)``
Valid operation names:
- add
- cat
- mul
- add_relu
- add_scalar
- mul_scalar
"""
def __init__(self):
super(QFunctional, self).__init__()
self.scale = 1.0
self.zero_point = 0
self.activation_post_process = torch.nn.Identity()
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(QFunctional, self)._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'scale'] = torch.tensor(self.scale)
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.scale = float(state_dict.pop(prefix + 'scale'))
self.zero_point = int(state_dict.pop(prefix + 'zero_point'))
super(QFunctional, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def _get_name(self):
return 'QFunctional'
def extra_repr(self):
return 'scale={}, zero_point={}'.format(
self.scale, self.zero_point
)
def forward(self, x):
raise RuntimeError("Functional is not intended to use the " +
"'forward'. Please use the underlying operation")
r"""Operation equivalent to ``torch.ops.quantized.add``"""
def add(self, x: Tensor, y: Tensor) -> Tensor:
r = ops.quantized.add(x, y, scale=self.scale, zero_point=self.zero_point)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.ops.quantized.add(Tensor, float)``"""
def add_scalar(self, x: Tensor, y: float) -> Tensor:
r = ops.quantized.add_scalar(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, Tensor)``"""
def mul(self, x: Tensor, y: Tensor) -> Tensor:
r = ops.quantized.mul(x, y, scale=self.scale, zero_point=self.zero_point)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.ops.quantized.mul(Tensor, float)``"""
def mul_scalar(self, x: Tensor, y: float) -> Tensor:
r = ops.quantized.mul_scalar(x, y)
# Note: this operation is not observed because the observation is not
# needed for the quantized op.
return r
r"""Operation equivalent to ``torch.ops.quantized.cat``"""
def cat(self, x: List[Tensor], dim: int = 0) -> Tensor:
r = ops.quantized.cat(x, scale=self.scale, zero_point=self.zero_point, dim=dim)
r = self.activation_post_process(r)
return r
r"""Operation equivalent to ``torch.ops.quantized.add_relu``"""
def add_relu(self, x: Tensor, y: Tensor) -> Tensor:
r = ops.quantized.add_relu(x, y, scale=self.scale, zero_point=self.zero_point)
r = self.activation_post_process(r)
return r
@classmethod
def from_float(cls, mod):
assert type(mod) == FloatFunctional,\
"QFunctional.from_float expects an instance of FloatFunctional"
scale, zero_point = mod.activation_post_process.calculate_qparams() # type: ignore[operator]
new_mod = QFunctional()
new_mod.scale = float(scale)
new_mod.zero_point = int(zero_point)
return new_mod
|
pytorch-master
|
torch/nn/quantized/modules/functional_modules.py
|
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.qat as nniqat
from torch.nn.quantized.modules.utils import _quantize_weight, hide_packed_params_repr, WeightedQuantizedModule
from torch.nn.utils.fusion import fuse_linear_bn_weights
from torch.nn.utils.parametrize import type_before_parametrizations
from typing import Optional
__all__ = ['LinearPackedParams', 'Linear']
class LinearPackedParams(torch.nn.Module):
_version = 3
def __init__(self, dtype=torch.qint8):
super().__init__()
self.dtype = dtype
if self.dtype == torch.qint8:
wq = torch._empty_affine_quantized([1, 1], scale=1.0, zero_point=0, dtype=torch.qint8)
elif self.dtype == torch.float16:
wq = torch.zeros([1, 1], dtype=torch.float)
self.set_weight_bias(wq, None)
@torch.jit.export
def set_weight_bias(self, weight: torch.Tensor, bias: Optional[torch.Tensor]) -> None:
if self.dtype == torch.qint8:
self._packed_params = torch.ops.quantized.linear_prepack(weight, bias)
elif self.dtype == torch.float16:
self._packed_params = torch.ops.quantized.linear_prepack_fp16(weight, bias)
else:
raise RuntimeError('Unsupported dtype on dynamic quantized linear!')
@torch.jit.export
def _weight_bias(self):
if self.dtype == torch.qint8:
return torch.ops.quantized.linear_unpack(self._packed_params)
elif self.dtype == torch.float16:
return torch.ops.quantized.linear_unpack_fp16(self._packed_params)
else:
raise RuntimeError('Unsupported dtype on dynamic quantized linear!')
def forward(self, x):
return x
# Version 1
# self
# |--- weight : Tensor
# |--- bias : Tensor
#
# Version 2
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- dtype : torch.dtype
#
# Version 3
# self
# |--- _packed_params : (Tensor, Tensor) representing (weight, bias)
# of LinearPackedParams
# |--- dtype : torch.dtype
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(LinearPackedParams, self)._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'dtype'] = self.dtype
destination[prefix + '_packed_params'] = self._weight_bias()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if version is None or version < 2:
self.dtype = torch.qint8
else:
self.dtype = state_dict[prefix + 'dtype']
state_dict.pop(prefix + 'dtype')
if version is None or version < 3:
self.set_weight_bias(state_dict[prefix + 'weight'], state_dict[prefix + 'bias'])
state_dict.pop(prefix + 'weight')
state_dict.pop(prefix + 'bias')
if version == 3:
weight, bias = state_dict[prefix + '_packed_params']
state_dict.pop(prefix + '_packed_params')
self.set_weight_bias(weight, bias)
super(LinearPackedParams, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def __repr__(self):
return self._weight_bias().__repr__()
class Linear(WeightedQuantizedModule):
r"""
A quantized linear module with quantized tensor as inputs and outputs.
We adopt the same interface as `torch.nn.Linear`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Linear for documentation.
Similar to :class:`~torch.nn.Linear`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module of
shape :math:`(\text{out\_features}, \text{in\_features})`.
bias (Tensor): the non-learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized to zero.
scale: `scale` parameter of output Quantized Tensor, type: double
zero_point: `zero_point` parameter for output Quantized Tensor, type: long
Examples::
>>> m = nn.quantized.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> # xdoctest: +SKIP
>>> input = torch.quantize_per_tensor(input, 1.0, 0, torch.quint8)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_version = 3
_FLOAT_MODULE = (nn.Linear, nn.modules.linear.NonDynamicallyQuantizableLinear)
def __init__(self, in_features, out_features, bias_=True,
dtype=torch.qint8):
super().__init__()
# We don't muck around with buffers or attributes or anything here
# to keep the module simple. *everything* is simply a Python attribute.
# Serialization logic is explicitly handled in the below serialization and
# deserialization modules
self.in_features = in_features
self.out_features = out_features
bias = None
if bias_:
bias = torch.zeros(out_features, dtype=torch.float)
if dtype == torch.qint8:
qweight = torch._empty_affine_quantized(
[out_features, in_features], scale=1, zero_point=0, dtype=torch.qint8)
elif dtype == torch.float16:
qweight = torch.zeros([out_features, in_features], dtype=torch.float)
else:
raise RuntimeError('Unsupported dtype specified for quantized Linear!')
self._packed_params = LinearPackedParams(dtype)
self._packed_params.set_weight_bias(qweight, bias)
self.scale = 1.0
self.zero_point = 0
def _get_name(self):
return 'QuantizedLinear'
def extra_repr(self):
return 'in_features={}, out_features={}, scale={}, zero_point={}, qscheme={}'.format(
self.in_features, self.out_features, self.scale, self.zero_point, self.weight().qscheme()
)
def __repr__(self):
return hide_packed_params_repr(self, LinearPackedParams)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.linear(
x, self._packed_params._packed_params, self.scale, self.zero_point)
# ===== Serialization methods =====
# The special consideration here is that we have to unpack the weights into their
# regular QTensor form for serialization. Packed weights should not live
# outside the process in which they were created, rather they should be derived
# from the QTensor weight.
#
# Version 1
# self
# |--- scale : float
# |--- zero_point : int
# |--- weight : Tensor
# |--- bias : Tensor
#
# Version 2
# self
# |--- scale : float
# |--- zero_point : int
# |--- _packed_params : Module
# |--- weight : Tensor
# |--- bias : Tensor
#
# Version 3
# self
# |--- scale : float
# |--- zero_point : int
# |--- _packed_params : Module
# |--- _packed_params : (Tensor, Tensor) representing weight, bias
# of LinearPackedParams C++ struct
#
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'scale'] = torch.tensor(self.scale)
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
# ===== Deserialization methods =====
# Counterpart to the serialization methods, we must pack the serialized QTensor
# weight into its packed format for use by the FBGEMM ops.
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.scale = float(state_dict[prefix + 'scale'])
state_dict.pop(prefix + 'scale')
self.zero_point = int(state_dict[prefix + 'zero_point'])
state_dict.pop(prefix + 'zero_point')
version = local_metadata.get('version', None)
if version is None or version == 1:
# We moved the parameters into a LinearPackedParameters submodule
weight = state_dict.pop(prefix + 'weight')
bias = state_dict.pop(prefix + 'bias')
state_dict.update({prefix + '_packed_params.weight': weight,
prefix + '_packed_params.bias': bias})
super()._load_from_state_dict(
state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
# Function rather than property to make sure that JIT serialization doesn't
# register this as an attribute
def _weight_bias(self):
return self._packed_params._weight_bias()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
self._packed_params.set_weight_bias(w, b)
@classmethod
def from_float(cls, mod):
r"""Create a quantized module from an observed float module
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
if hasattr(mod, 'weight_fake_quant'):
if type_before_parametrizations(mod) == nniqat.LinearBn1d:
mod.weight, mod.bias = fuse_linear_bn_weights(
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
mod.bn.eps, mod.bn.weight, mod.bn.bias)
weight_post_process = mod.weight_fake_quant
activation_post_process = mod.activation_post_process
else:
# This function does not participate in JIT, so it is OK to ignore
# the type mismatch in assignment. Also, mypy has an issue with
# iterables not being implemented, so we are ignoring those too.
if not isinstance(cls._FLOAT_MODULE, Iterable):
cls._FLOAT_MODULE = [cls._FLOAT_MODULE] # type: ignore[assignment]
supported_modules = ', '.join([float_mod.__name__ for float_mod in cls._FLOAT_MODULE]) # type: ignore[attr-defined]
error_msg = 'nnq.{}.from_float only works for {}, but got: {}'.format(cls.__name__, supported_modules, type(mod))
assert type_before_parametrizations(mod) in cls._FLOAT_MODULE, error_msg.format() # type: ignore[attr-defined]
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'
activation_post_process = mod.activation_post_process
if type_before_parametrizations(mod) == nni.LinearReLU:
mod = mod[0]
weight_post_process = mod.qconfig.weight()
weight_post_process(mod.weight)
dtype = weight_post_process.dtype
act_scale, act_zp = activation_post_process.calculate_qparams()
assert dtype == torch.qint8, 'Weight observer must have dtype torch.qint8'
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
qlinear = cls(mod.in_features,
mod.out_features,
dtype=dtype)
qlinear.set_weight_bias(qweight, mod.bias)
qlinear.scale = float(act_scale)
qlinear.zero_point = int(act_zp)
return qlinear
@classmethod
def from_reference(cls, ref_qlinear, output_scale, output_zero_point):
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
ref_qlinear (Module): a reference quantized linear module, either produced by torch.ao.quantization
utilities or provided by the user
output_scale (float): scale for output Tensor
zero_point (int): zero point for output Tensor
"""
qlinear = cls(
ref_qlinear.in_features,
ref_qlinear.out_features)
qweight = ref_qlinear.get_quantized_weight()
qlinear.set_weight_bias(qweight, ref_qlinear.bias)
qlinear.scale = float(output_scale)
qlinear.zero_point = int(output_zero_point)
return qlinear
|
pytorch-master
|
torch/nn/quantized/modules/linear.py
|
import torch
from torch.nn.modules.pooling import MaxPool2d
from .activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid, Softmax, MultiheadAttention, PReLU
from .dropout import Dropout
from .batchnorm import BatchNorm2d, BatchNorm3d
from .normalization import LayerNorm, GroupNorm, InstanceNorm1d, \
InstanceNorm2d, InstanceNorm3d
from .conv import Conv1d, Conv2d, Conv3d
from .conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
from .linear import Linear
from .embedding_ops import Embedding, EmbeddingBag
from .rnn import LSTM
from .functional_modules import FloatFunctional, FXFloatFunctional, QFunctional
class Quantize(torch.nn.Module):
r"""Quantizes an incoming tensor
Args:
`scale`: scale of the output Quantized Tensor
`zero_point`: zero_point of output Quantized Tensor
`dtype`: data type of output Quantized Tensor
`factory_kwargs`: Dictionary of kwargs used for configuring initialization
of internal buffers. Currently, `device` and `dtype` are supported.
Example: `factory_kwargs={'device': 'cuda', 'dtype': torch.float64}`
will initialize internal buffers as type `torch.float64` on the current CUDA device.
Note that `dtype` only applies to floating-point buffers.
Examples::
>>> t = torch.tensor([[1., -1.], [1., -1.]])
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
>>> qm = Quantize(scale, zero_point, dtype)
>>> # xdoctest: +SKIP
>>> qt = qm(t)
>>> print(qt)
tensor([[ 1., -1.],
[ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)
"""
scale: torch.Tensor
zero_point: torch.Tensor
def __init__(self, scale, zero_point, dtype, factory_kwargs=None):
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
super(Quantize, self).__init__()
self.register_buffer('scale', torch.tensor([scale], **factory_kwargs))
self.register_buffer('zero_point',
torch.tensor([zero_point], dtype=torch.long,
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}))
self.dtype = dtype
def forward(self, X):
return torch.quantize_per_tensor(X, float(self.scale),
int(self.zero_point), self.dtype)
@staticmethod
def from_float(mod):
assert hasattr(mod, 'activation_post_process')
scale, zero_point = mod.activation_post_process.calculate_qparams()
return Quantize(scale.float().item(), zero_point.long().item(), mod.activation_post_process.dtype)
def extra_repr(self):
return 'scale={}, zero_point={}, dtype={}'.format(self.scale, self.zero_point, self.dtype)
class DeQuantize(torch.nn.Module):
r"""Dequantizes an incoming tensor
Examples::
>>> input = torch.tensor([[1., -1.], [1., -1.]])
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
>>> qm = Quantize(scale, zero_point, dtype)
>>> # xdoctest: +SKIP
>>> quantized_input = qm(input)
>>> dqm = DeQuantize()
>>> dequantized = dqm(quantized_input)
>>> print(dequantized)
tensor([[ 1., -1.],
[ 1., -1.]], dtype=torch.float32)
"""
def __init__(self):
super(DeQuantize, self).__init__()
def forward(self, Xq):
return Xq.dequantize()
@staticmethod
def from_float(mod):
return DeQuantize()
__all__ = [
'BatchNorm2d',
'BatchNorm3d',
'Conv1d',
'Conv2d',
'Conv3d',
'ConvTranspose1d',
'ConvTranspose2d',
'ConvTranspose3d',
'DeQuantize',
'ELU',
'Embedding',
'EmbeddingBag',
'GroupNorm',
'Hardswish',
'InstanceNorm1d',
'InstanceNorm2d',
'InstanceNorm3d',
'LayerNorm',
'LeakyReLU',
'Linear',
'LSTM',
'MaxPool2d',
'MultiheadAttention',
'Quantize',
'ReLU6',
'Sigmoid',
'Softmax',
'Dropout',
'PReLU',
# Wrapper modules
'FloatFunctional',
'FXFloatFunctional',
'QFunctional',
]
|
pytorch-master
|
torch/nn/quantized/modules/__init__.py
|
import torch
import torch.nn.quantized.functional
class ReLU6(torch.nn.ReLU):
r"""Applies the element-wise function:
:math:`\text{ReLU6}(x) = \min(\max(x_0, x), q(6))`, where :math:`x_0` is the
zero_point, and :math:`q(6)` is the quantized representation of number 6.
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: ../scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.quantized.ReLU6()
>>> input = torch.randn(2)
>>> # xdoctest: +SKIP
>>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32)
>>> output = m(input)
"""
def __init__(self, inplace=False):
super(ReLU6, self).__init__(inplace)
self.inplace = inplace
def forward(self, input):
return torch.ops.quantized.relu6(input, self.inplace)
def _get_name(self):
return 'QuantizedReLU6'
@staticmethod
def from_float(mod):
return ReLU6(mod.inplace)
class Hardswish(torch.nn.Hardswish):
r"""This is the quantized version of :class:`~torch.nn.Hardswish`.
Args:
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
"""
def __init__(self, scale, zero_point):
super(Hardswish, self).__init__()
self.scale = scale
self.zero_point = zero_point
def forward(self, input):
return torch.nn.quantized.functional.hardswish(
input, scale=self.scale, zero_point=self.zero_point)
def _get_name(self):
return 'QuantizedHardswish'
@staticmethod
def from_float(mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
return Hardswish(float(scale), int(zero_point))
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(float(scale), int(zero_point))
class ELU(torch.nn.ELU):
r"""This is the quantized equivalent of :class:`~torch.nn.ELU`.
Args:
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
alpha: the alpha constant
"""
def __init__(self, scale, zero_point, alpha=1.):
super(ELU, self).__init__(alpha)
self.scale = scale
self.zero_point = zero_point
def forward(self, input):
return torch.nn.quantized.functional.elu(
input, self.scale, self.zero_point, self.alpha)
def _get_name(self):
return 'QuantizedELU'
@staticmethod
def from_float(mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
return ELU(float(scale), int(zero_point), mod.alpha)
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(float(scale), int(zero_point), mod.alpha)
class LeakyReLU(torch.nn.LeakyReLU):
r"""This is the quantized equivalent of :class:`~torch.nn.LeakyReLU`.
Args:
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
negative_slope: Controls the angle of the negative slope. Default: 1e-2
"""
def __init__(self, scale: float, zero_point: int, negative_slope: float = 1e-2,
inplace: bool = False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__(negative_slope, inplace)
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.leaky_relu(
input, self.negative_slope, self.inplace, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedLeakyReLU'
@classmethod
def from_float(cls, mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(float(scale), int(zero_point), mod.negative_slope, mod.inplace)
class Sigmoid(torch.nn.Sigmoid):
r"""This is the quantized equivalent of :class:`~torch.nn.Sigmoid`.
Args:
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
"""
def __init__(self, output_scale: float, output_zero_point: int):
super().__init__()
self.output_scale = output_scale
self.output_zero_point = output_zero_point
def forward(self, input):
return torch.ops.quantized.sigmoid(input, self.output_scale, self.output_zero_point)
@classmethod
def from_float(cls, mod):
output_scale, output_zero_point = mod.activation_post_process.calculate_qparams()
return cls(float(output_scale), int(output_zero_point))
class Softmax(torch.nn.Softmax):
r"""This is the quantized version of :class:`~torch.nn.Softmax`.
Args:
dim: A dimension along which Softmax will be computed (so every slice along dim will sum to 1).
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
"""
def __init__(self, dim=None, scale=1.0, zero_point=0):
super().__init__()
self.dim = dim
self.scale = scale
self.zero_point = zero_point
def forward(self, input):
dim = self.dim
if dim is None:
stacklevel = 3
# Note: adding the mypy ignore on _get_softmax_dim seems less bad
# than making `_get_softmax_dim` an official API.
dim = torch.nn.functional._get_softmax_dim( # type: ignore[attr-defined]
"softmax", input.dim(), stacklevel)
return torch.ops.quantized.softmax(
input, dim, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedSoftmax'
@staticmethod
def from_float(mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
return Softmax(mod.dim, float(scale), int(zero_point))
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(mod.dim, float(scale), int(zero_point))
class MultiheadAttention(torch.nn.quantizable.MultiheadAttention):
_FLOAT_MODULE = torch.nn.quantizable.MultiheadAttention
def _get_name(self):
return "QuantizedMultiheadAttention"
@classmethod
def from_float(cls, other):
# The whole flow is float -> observed -> quantized
# This class does observed -> quantized only
raise NotImplementedError("It looks like you are trying to convert a "
"non-observed MHA module. Please, see "
"the examples on quantizable MHAs.")
@classmethod
def from_observed(cls, other):
converted = torch.ao.quantization.convert(other, mapping=None,
inplace=False,
remove_qconfig=True,
convert_custom_config_dict=None)
converted.__class__ = cls
# Remove the parameters for the bias_k and bias_v to quantize them
# TODO: This is a potential source of accuracy drop.
# quantized cat takes the scale and zp of the first
# element, which might lose the precision in the bias_k
# and the bias_v (which are cat'ed with k/v being first).
if converted.bias_k is not None:
bias_k = converted._parameters.pop('bias_k')
sc, zp = torch._choose_qparams_per_tensor(bias_k,
reduce_range=False)
bias_k = torch.quantize_per_tensor(bias_k, sc, zp, torch.quint8)
setattr(converted, 'bias_k', bias_k) # noqa: B010
if converted.bias_v is not None:
bias_v = converted._parameters.pop('bias_v')
sc, zp = torch._choose_qparams_per_tensor(bias_k,
reduce_range=False)
bias_v = torch.quantize_per_tensor(bias_v, sc, zp, torch.quint8)
setattr(converted, 'bias_v', bias_v) # noqa: B010
return converted
class PReLU(torch.nn.Module):
r"""This is the quantized equivalent of :class:`~torch.nn.PReLU`.
Args:
scale: quantization scale of the output tensor
zero_point: quantization zero point of the output tensor
num_parameters: number of parameters: 1, or the number of channels at input. Default: 1
"""
def __init__(self, output_scale: float, output_zero_point: int,
num_parameters: int = 1) -> None:
super().__init__()
self.num_parameters = num_parameters
self.scale = output_scale
self.zero_point = output_zero_point
w = torch.randn(num_parameters, dtype=torch.float)
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.quint8)
self.set_weight(qw)
def set_weight(self, w: torch.Tensor) -> None:
self.weight = w
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.ops.quantized.prelu(input, self.weight, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedPReLU'
@classmethod
def from_float(cls, mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
float_wt = mod.weight.float()
observer = mod.qconfig.weight()
wt_scale, wt_zp = observer.calculate_qparams()
qweight = torch.quantize_per_tensor(
float_wt, float(wt_scale), int(wt_zp), torch.quint8)
qprelu.set_weight(qweight)
return qprelu
@classmethod
def from_reference(cls, mod, scale, zero_point):
qprelu = cls(float(scale), int(zero_point), mod.num_parameters)
float_wt = mod.weight.float()
observer = mod.qconfig.weight()
wt_scale, wt_zp = observer.calculate_qparams()
qweight = torch.quantize_per_tensor(
float_wt, float(wt_scale), int(wt_zp), torch.quint8)
qprelu.set_weight(qweight)
return qprelu
|
pytorch-master
|
torch/nn/quantized/modules/activation.py
|
import abc
import torch
from itertools import repeat
import collections
from torch.nn.modules.module import _addindent
class WeightedQuantizedModule(torch.nn.Module, metaclass=abc.ABCMeta):
"""Wrapper for quantized modules than can be lowered from reference modules."""
@classmethod
@abc.abstractmethod
def from_reference(cls, ref_module, output_scale, output_zero_point):
raise NotImplementedError
def _quantize_weight(float_wt, observer):
wt_scale, wt_zp = observer.calculate_qparams()
if observer.qscheme in [torch.per_tensor_symmetric, torch.per_tensor_affine]:
qweight = torch.quantize_per_tensor(
float_wt,
float(wt_scale), int(wt_zp), torch.qint8)
elif observer.qscheme in [torch.per_channel_symmetric, torch.per_channel_affine]:
wt_axis = observer.ch_axis
qweight = torch.quantize_per_channel(
float_wt,
wt_scale.to(torch.double), wt_zp.to(torch.int64), wt_axis, torch.qint8)
elif observer.qscheme in [torch.per_channel_affine_float_qparams]:
qweight = torch.quantize_per_channel(
float_wt,
wt_scale.to(torch.float), wt_zp.to(torch.float), observer.ch_axis, observer.dtype)
else:
raise ValueError("Unexpected qscheme " + observer.qscheme)
return qweight
def _ntuple_from_first(n):
"""Converts the argument to a tuple of size n
with the first element repeated."""
def parse(x):
while isinstance(x, collections.abc.Sequence):
if len(x) == n:
break
x = x[0]
return tuple(repeat(x, n))
return parse
def hide_packed_params_repr(self, params):
# We don't want to show `PackedParams` children, hence custom
# `__repr__`. This is the same as nn.Module.__repr__, except the check
# for the `params module`.
extra_lines = []
extra_repr = self.extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split('\n')
child_lines = []
for key, module in self._modules.items():
if isinstance(module, params):
continue
mod_str = repr(module)
mod_str = _addindent(mod_str, 2)
child_lines.append('(' + key + '): ' + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + '('
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(lines) + '\n'
main_str += ')'
return main_str
_pair_from_first = _ntuple_from_first(2)
|
pytorch-master
|
torch/nn/quantized/modules/utils.py
|
import torch
import torch.nn.quantized.functional
__all__ = ['Dropout']
class Dropout(torch.nn.Dropout):
r"""This is the quantized equivalent of :class:`~torch.nn.Dropout`.
And this is a placeholder to enable models where fp32 tensors
had dropout to work with quantized tensors in train and eval mode.
Args:
p: probability of an element to be zeroed
inplace: can optionally do the operation in-place. Default: ``False``
"""
def forward(self, input):
return input
def _get_name(self):
return 'QuantizedDropout'
@classmethod
def from_float(cls, mod):
return cls(mod.p, mod.inplace)
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(mod.p, mod.inplace)
|
pytorch-master
|
torch/nn/quantized/modules/dropout.py
|
# coding=utf-8
r"""Quantized convolution modules."""
from typing import Optional, List, TypeVar
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.intrinsic as nni
import torch.nn.intrinsic.qat as nniqat
from torch._ops import ops
from torch.nn.common_types import _size_1_t
from torch.nn.modules.utils import _single, _pair, _triple
from torch.nn.quantized.modules.utils import _quantize_weight, WeightedQuantizedModule
from torch.nn.utils import fuse_conv_bn_weights
__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d']
_SUPPORTED_PADDING = {
'zeros',
'reflect'
}
def _reverse_repeat_padding(padding: List[int]) -> List[int]:
_reversed_padding_repeated_twice: List[int] = []
N = len(padding)
for idx in range(N):
for _ in range(2):
_reversed_padding_repeated_twice.append(padding[N - idx - 1])
return _reversed_padding_repeated_twice
class _ConvNd(WeightedQuantizedModule):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
# All subclasses have this signature - See PR #49702s
raise NotImplementedError
def _init(self, in_channels, out_channels, kernel_size, stride,
padding, dilation,
transposed, output_padding,
groups, bias,
padding_mode='zeros',
device=None,
dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if padding_mode not in _SUPPORTED_PADDING:
raise ValueError("'padding_mode' {} is not supported by quantized convolution".format(padding_mode))
self.padding_mode = padding_mode
# Initialize as NCHW. set_weight will internally transpose to NHWC.
if self.transposed:
weight_shape = [in_channels, out_channels // self.groups]
else:
weight_shape = [out_channels, in_channels // self.groups]
qweight = torch._empty_affine_quantized(
weight_shape + list(kernel_size),
scale=1, zero_point=0, dtype=torch.qint8,
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'})
bias_float = (
torch.zeros(out_channels, dtype=torch.float,
**{k: v for k, v in factory_kwargs.items() if k != 'dtype'}) if bias else None)
self.set_weight_bias(qweight, bias_float)
self.scale = 1.0
self.zero_point = 0
def set_weight_bias(self, qweight, bias_float):
raise NotImplementedError
def bias(self):
raise NotImplementedError
def _weight_bias(self):
raise NotImplementedError
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}, scale={scale}, zero_point={zero_point}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias() is None:
s += ', bias=False'
return s.format(**self.__dict__)
# ===== Serialization methods =====
# The special consideration here is that we have to unpack the weights into
# their regular QTensor form for serialization. Packed weights should not
# live outside the process in which they were created, rather they should be
# derived from the QTensor weight.
# self
# |--- weight : Tensor
# |--- bias : Tensor
#
# TODO: maybe change to this when https://github.com/pytorch/pytorch/pull/32958 is landed
# self
# |--- _packed_params : Conv2dPackedParamsBase or Conv3dPackedParamsBase
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(_ConvNd, self)._save_to_state_dict(destination, prefix, keep_vars)
(w, b) = self._weight_bias()
destination[prefix + 'weight'] = w
destination[prefix + 'bias'] = b
destination[prefix + 'scale'] = torch.tensor(self.scale)
destination[prefix + 'zero_point'] = torch.tensor(self.zero_point)
@torch.jit.export
def __getstate__(self):
(w, b) = self._weight_bias()
return (
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.transposed,
self.output_padding,
self.groups,
self.padding_mode,
w,
b,
self.scale,
self.zero_point,
self.training
)
# ===== Deserialization methods =====
# Counterpart to the serialization methods, we must pack the serialized
# QTensor weight into its packed format for use by the FBGEMM ops.
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.set_weight_bias(
state_dict[prefix + 'weight'], state_dict[prefix + 'bias'])
state_dict.pop(prefix + 'weight')
state_dict.pop(prefix + 'bias')
self.scale = float(state_dict[prefix + 'scale'])
state_dict.pop(prefix + 'scale')
self.zero_point = int(state_dict[prefix + 'zero_point'])
state_dict.pop(prefix + 'zero_point')
super(_ConvNd, self)._load_from_state_dict(
state_dict, prefix, local_metadata, False, missing_keys,
unexpected_keys, error_msgs)
@torch.jit.export
def __setstate__(self, state):
self.in_channels = state[0]
self.out_channels = state[1]
self.kernel_size = state[2]
self.stride = state[3]
self.padding = state[4]
self.dilation = state[5]
self.transposed = state[6]
self.output_padding = state[7]
self.groups = state[8]
self.padding_mode = state[9]
self.set_weight_bias(state[10], state[11])
self.scale = state[12]
self.zero_point = state[13]
self.training = state[14]
def __deepcopy__(self, memo):
new_instance = type(self).__new__(type(self))
torch.nn.Module.__init__(new_instance)
state = self.__getstate__()
new_instance.__setstate__(state)
return new_instance
def __copy__(self):
return self.__deepcopy__({})
@classmethod
def get_qconv(cls, mod, activation_post_process, weight_post_process=None):
r"""Creates a qconv object and returns it.
"""
if weight_post_process is None:
weight_post_process = mod.qconfig.weight()
weight_post_process(mod.weight)
assert weight_post_process.dtype == torch.qint8, \
'Weight observer must have a dtype of qint8'
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
# the __init__ call used is the one from derived classes and not the one from _ConvNd
qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size,
mod.stride, mod.padding, mod.dilation, mod.groups,
mod.bias is not None, mod.padding_mode)
qconv.set_weight_bias(qweight, mod.bias)
if activation_post_process is None or activation_post_process.dtype == torch.float:
return qconv # dynamic quantization doesn't need scale/zero_point
else:
act_scale, act_zp = activation_post_process.calculate_qparams()
qconv.scale = float(act_scale)
qconv.zero_point = int(act_zp)
return qconv
@staticmethod
def from_float(cls, mod):
if hasattr(mod, "weight_fake_quant"):
# assert type(mod) == cls.__QAT_MODULE, " nnq." + cls.__name__ + \
# ".from_float only works for " + cls.__QAT_MODULE.__name__
if type(mod) == cls._NNIQAT_CONV_BN_MODULE:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var,
mod.bn.eps, mod.bn.weight, mod.bn.bias)
assert hasattr(mod, "activation_post_process"), \
"Input QAT module must have observer attached"
weight_post_process = mod.weight_fake_quant
activation_post_process = mod.activation_post_process
else:
assert type(mod) == cls._FLOAT_MODULE, \
" nnq." + cls.__name__ + ".from_float only works for " + \
cls._FLOAT_MODULE.__name__ + " but got:" + str(type(mod))
assert hasattr(mod, "qconfig"), \
"Input float module must have qconfig defined."
activation_post_process = None if not hasattr(
mod, "activation_post_process") else mod.activation_post_process
if type(mod) == cls._NNI_CONV_RELU_MODULE:
mod = mod[0]
weight_post_process = mod.qconfig.weight()
return cls.get_qconv(mod, activation_post_process, weight_post_process)
@classmethod
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
ref_module (Module): a reference quantized module, either produced by torch.ao.quantization
utilities or provided by the user
output_scale (float): scale for output Tensor
output_zero_point (int): zero point for output Tensor
"""
qconv = cls(
ref_qconv.in_channels,
ref_qconv.out_channels,
ref_qconv.kernel_size, # type: ignore[arg-type]
ref_qconv.stride, # type: ignore[arg-type]
ref_qconv.padding, # type: ignore[arg-type]
ref_qconv.dilation, # type: ignore[arg-type]
ref_qconv.groups,
ref_qconv.bias is not None, # type: ignore[arg-type]
ref_qconv.padding_mode,
device=ref_qconv.weight.device,
dtype=ref_qconv.weight.dtype)
qweight = ref_qconv.get_quantized_weight()
qconv.set_weight_bias(qweight, ref_qconv.bias)
qconv.scale = float(output_scale)
qconv.zero_point = int(output_zero_point)
return qconv
class Conv1d(_ConvNd):
r"""Applies a 1D convolution over a quantized input signal composed of
several quantized input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv1d`.
.. note::
Only `zeros` is supported for the :attr:`padding_mode` argument.
.. note::
Only `torch.quint8` is supported for the input data type.
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.Conv1d` for other attributes.
Examples::
>>> m = nn.quantized.Conv1d(16, 33, 3, stride=2)
>>> input = torch.randn(20, 16, 100)
>>> # quantize input to quint8
>>> # xdoctest: +SKIP
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0,
dtype=torch.quint8)
>>> output = m(q_input)
"""
_FLOAT_MODULE = nn.Conv1d
_NNIQAT_CONV_BN_MODULE = nniqat.ConvBn1d
_NNI_CONV_RELU_MODULE = nni.ConvReLU1d
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = padding if isinstance(padding, str) else _single(padding)
dilation = _single(dilation)
# Subclasses of _ConvNd needs to call _init rather than __init__. See
# discussion on PR #49702
super(Conv1d, self)._init(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _single(0), groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'QuantizedConv1d'
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
if self.padding_mode == 'zeros':
self._packed_params = torch.ops.quantized.conv1d_prepack(
w, b, self.stride, self.padding, self.dilation, self.groups)
else:
self._packed_params = torch.ops.quantized.conv1d_prepack(
w, b, self.stride, _pair(0), self.dilation,
self.groups)
def _weight_bias(self):
w, b = torch.ops.quantized.conv1d_unpack(self._packed_params)
return w, b
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
if self.padding_mode != 'zeros':
# Padding in Conv1d is stored as (p, p), need to get (p,)
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding[:1])
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return ops.quantized.conv1d(input, self._packed_params, self.scale, self.zero_point)
@classmethod
def from_float(cls, mod):
r"""Creates a quantized module from a float module or qparams_dict.
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
return _ConvNd.from_float(cls, mod)
class Conv2d(_ConvNd):
r"""Applies a 2D convolution over a quantized input signal composed of
several quantized input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv2d`.
.. note::
Only `zeros` is supported for the :attr:`padding_mode` argument.
.. note::
Only `torch.quint8` is supported for the input data type.
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.Conv2d` for other attributes.
Examples::
>>> # With square kernels and equal stride
>>> m = nn.quantized.Conv2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.quantized.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> input = torch.randn(20, 16, 50, 100)
>>> # quantize input to quint8
>>> # xdoctest: +SKIP
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> output = m(q_input)
"""
_FLOAT_MODULE = nn.Conv2d
_NNIQAT_CONV_BN_MODULE = nniqat.ConvBn2d
_NNI_CONV_RELU_MODULE = nni.ConvReLU2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
# Subclasses of _ConvNd need to call _init rather than __init__. See
# discussion on PR #49702
super(Conv2d, self)._init(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'QuantizedConv2d'
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
if self.padding_mode == 'zeros':
self._packed_params = torch.ops.quantized.conv2d_prepack(
w, b, self.stride, self.padding, self.dilation, self.groups)
else:
self._packed_params = torch.ops.quantized.conv2d_prepack(
w, b, self.stride, _pair(0), self.dilation, self.groups)
def _weight_bias(self):
return self._packed_params.unpack()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return ops.quantized.conv2d(
input, self._packed_params, self.scale, self.zero_point)
@classmethod
def from_float(cls, mod):
r"""Creates a quantized module from a float module or qparams_dict.
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
return _ConvNd.from_float(cls, mod)
class Conv3d(_ConvNd):
r"""Applies a 3D convolution over a quantized input signal composed of
several quantized input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.Conv3d`.
.. note::
Only `zeros` is supported for the :attr:`padding_mode` argument.
.. note::
Only `torch.quint8` is supported for the input data type.
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.Conv3d` for other attributes.
Examples::
>>> # With square kernels and equal stride
>>> m = nn.quantized.Conv3d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.quantized.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
>>> input = torch.randn(20, 16, 56, 56, 56)
>>> # quantize input to quint8
>>> # xdoctest: +SKIP
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> output = m(q_input)
"""
_FLOAT_MODULE = nn.Conv3d
_NNIQAT_CONV_BN_MODULE = nniqat.ConvBn3d
_NNI_CONV_RELU_MODULE = nni.ConvReLU3d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros', device=None, dtype=None):
assert padding_mode != 'reflect', "Conv3d does not support reflection padding"
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
# Subclasses of _ConvNd need to call _init rather than __init__. See
# discussion on PR #49702
super(Conv3d, self)._init(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _triple(0), groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'QuantizedConv3d'
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
if self.padding_mode == 'zeros':
self._packed_params = torch.ops.quantized.conv3d_prepack(
w, b, self.stride, self.padding, self.dilation, self.groups)
else:
self._packed_params = torch.ops.quantized.conv3d_prepack(
w, b, self.stride, _triple(0), self.dilation, self.groups)
def _weight_bias(self):
return self._packed_params.unpack()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, D, H, W)`!")
if self.padding_mode != 'zeros':
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(input, _reversed_padding_repeated_twice,
mode=self.padding_mode)
return ops.quantized.conv3d(
input, self._packed_params, self.scale, self.zero_point)
@classmethod
def from_float(cls, mod):
r"""Creates a quantized module from a float module or qparams_dict.
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
return _ConvNd.from_float(cls, mod)
# === Transposed Convolutions ===
MOD = TypeVar('MOD', bound=nn.modules.conv._ConvNd)
class _ConvTransposeNd(_ConvNd):
_FLOAT_MODULE = MOD
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode, device=None, dtype=None):
if padding_mode != 'zeros':
raise ValueError('Only "zeros" padding mode is supported for {}'.format(self.__class__.__name__))
factory_kwargs = {'device': device, 'dtype': dtype}
# Subclasses of _ConvNd need to call _init rather than __init__. See
# discussion on PR #49702
super(_ConvTransposeNd, self)._init(
in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding,
groups, bias, padding_mode, **factory_kwargs)
def _input_padding(self, kernel_size: List[int], dilation: List[int], padding: List[int]) -> List[int]:
res = torch.jit.annotate(List[int], [])
for kdx in range(len(kernel_size)):
pad = (dilation[kdx] * (kernel_size[kdx] - 1) - padding[kdx])
res.append(pad)
return res
@classmethod
def from_float(cls, mod):
r"""Creates a quantized module from a float module or qparams_dict.
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by the user
"""
# derived classes override cls._FLOAT_MODULE attribute
msg = ' nnq.' + cls.__name__ + '.from_float only works for ' + \
cls._FLOAT_MODULE.__name__ # type: ignore[attr-defined]
assert type(mod) == cls._FLOAT_MODULE, msg
assert hasattr(mod, 'qconfig'), \
'Input float module must have qconfig defined.'
weight_post_process = mod.qconfig.weight()
weight_post_process(mod.weight)
assert weight_post_process.dtype == torch.qint8, \
'Weight observer must have a dtype of qint8'
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
# the __init__ call used is the one from derived classes and not the one from _ConvTransposeNd
qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size, # type: ignore[call-arg]
mod.stride, mod.padding, mod.output_padding, mod.groups,
mod.bias is not None, mod.dilation, mod.padding_mode)
qconv.set_weight_bias(qweight, mod.bias)
if not hasattr(mod, "activation_post_process") or mod.activation_post_process.dtype == torch.float:
return qconv # dynamic quantization doesn't need scale/zero_point
else:
act_scale, act_zp = mod.activation_post_process.calculate_qparams()
qconv.scale = float(act_scale)
qconv.zero_point = int(act_zp)
return qconv
@staticmethod
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
ref_module (Module): a reference quantized module, either produced by torch.ao.quantization
utilities or provided by the user
output_scale (float): scale for output Tensor
output_zero_point (int): zero point for output Tensor
"""
qconv = cls(
ref_qconvt.in_channels,
ref_qconvt.out_channels,
ref_qconvt.kernel_size, # type: ignore[arg-type]
ref_qconvt.stride, # type: ignore[arg-type]
ref_qconvt.padding, # type: ignore[arg-type]
ref_qconvt.output_padding, # type: ignore[arg-type]
ref_qconvt.groups,
ref_qconvt.bias is not None, # type: ignore[arg-type]
ref_qconvt.dilation, # type: ignore[arg-type]
ref_qconvt.padding_mode,
device=ref_qconvt.weight.device,
dtype=ref_qconvt.weight.dtype)
qweight = ref_qconvt.get_quantized_weight()
qconv.set_weight_bias(qweight, ref_qconvt.bias)
qconv.scale = float(output_scale)
qconv.zero_point = int(output_zero_point)
return qconv
class ConvTranspose1d(_ConvTransposeNd):
r"""Applies a 1D transposed convolution operator over an input image
composed of several input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose1d`.
.. note:: Currently only the QNNPACK engine is implemented.
Please, set the `torch.backends.quantized.engine = 'qnnpack'`
For special notes, please, see :class:`~torch.nn.quantized.Conv1d`
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose2d` for other attributes.
Examples::
>>> torch.backends.quantized.engine = 'qnnpack'
>>> # With square kernels and equal stride
>>> # xdoctest: +SKIP
>>> m = nnq.ConvTranspose1d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = torch.randn(20, 16, 50)
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> output = m(q_input)
>>> # exact output size can be also specified as an argument
>>> input = torch.randn(1, 16, 12)
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> downsample = nnq.Conv1d(16, 16, 3, stride=2, padding=1)
>>> upsample = nnq.ConvTranspose1d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(q_input)
>>> h.size()
torch.Size([1, 16, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12])
"""
_FLOAT_MODULE = nn.ConvTranspose1d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True,
dilation=1, padding_mode='zeros', device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
output_padding = _single(output_padding)
super(ConvTranspose1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'QuantizedConvTranpose1d'
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
self._packed_params = torch.ops.quantized.conv_transpose1d_prepack(
w, b, self.stride, self.padding, self.output_padding, self.dilation,
self.groups)
def _weight_bias(self):
w, b = torch.ops.quantized.conv_transpose1d_unpack(self._packed_params)
return w, b
def weight(self):
(w, _) = self._weight_bias()
return w
def bias(self):
(_, b) = self._weight_bias()
return b
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 3:
raise ValueError("Input shape must be `(N, C, L)`!")
return torch.ops.quantized.conv_transpose1d(
input, self._packed_params, self.scale, self.zero_point)
@classmethod
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
class ConvTranspose2d(_ConvTransposeNd):
r"""Applies a 2D transposed convolution operator over an input image
composed of several input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose2d`.
For special notes, please, see :class:`~torch.nn.quantized.Conv2d`
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose2d` for other attributes.
Examples::
>>> # QNNPACK or FBGEMM as backend
>>> torch.backends.quantized.engine = 'qnnpack'
>>> # With square kernels and equal stride
>>> # xdoctest: +SKIP
>>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> input = torch.randn(20, 16, 50, 100)
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> output = m(q_input)
>>> # exact output size can be also specified as an argument
>>> input = torch.randn(1, 16, 12, 12)
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
>>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(q_input)
>>> h.size()
torch.Size([1, 16, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12])
"""
_FLOAT_MODULE = nn.ConvTranspose2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True,
dilation=1, padding_mode='zeros', device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
output_padding = _pair(output_padding)
super(ConvTranspose2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'QuantizedConvTranpose2d'
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
self._packed_params = torch.ops.quantized.conv_transpose2d_prepack(
w, b, self.stride, self.padding, self.output_padding, self.dilation,
self.groups)
def _weight_bias(self):
w, b = torch.ops.quantized.conv2d_unpack(self._packed_params)
return w, b
def weight(self):
(w, _) = self._weight_bias()
return w
def bias(self):
(_, b) = self._weight_bias()
return b
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
return ops.quantized.conv_transpose2d(
input, self._packed_params, self.scale, self.zero_point)
@classmethod
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
class ConvTranspose3d(_ConvTransposeNd):
r"""Applies a 3D transposed convolution operator over an input image
composed of several input planes.
For details on input arguments, parameters, and implementation see
:class:`~torch.nn.ConvTranspose3d`.
.. note:: Currently only the FBGEMM engine is implemented.
Please, set the `torch.backends.quantized.engine = 'fbgemm'`
For special notes, please, see :class:`~torch.nn.quantized.Conv3d`
Attributes:
weight (Tensor): packed tensor derived from the learnable weight
parameter.
scale (Tensor): scalar for the output scale
zero_point (Tensor): scalar for the output zero point
See :class:`~torch.nn.ConvTranspose3d` for other attributes.
Examples::
>>> torch.backends.quantized.engine = 'fbgemm'
>>> # With cubic kernels and equal stride
>>> # xdoctest: +SKIP
>>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
>>> # non-cubic kernels and unequal stride and with padding
>>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
>>> input = torch.randn(20, 16, 50, 100, 100)
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> output = m(q_input)
>>> # exact output size can be also specified as an argument
>>> input = torch.randn(1, 16, 12, 12, 12)
>>> q_input = torch.quantize_per_tensor(input, scale=1.0, zero_point=0, dtype=torch.quint8)
>>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
>>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1)
>>> h = downsample(q_input)
>>> h.size()
torch.Size([1, 16, 6, 6, 6])
>>> output = upsample(h, output_size=input.size())
>>> output.size()
torch.Size([1, 16, 12, 12, 12])
"""
_FLOAT_MODULE = nn.ConvTranspose3d
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, bias=True,
dilation=1, padding_mode='zeros', device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
kernel_size = _triple(kernel_size)
stride = _triple(stride)
padding = _triple(padding)
dilation = _triple(dilation)
output_padding = _triple(output_padding)
super(ConvTranspose3d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
True, output_padding, groups, bias, padding_mode, **factory_kwargs)
def _get_name(self):
return 'QuantizedConvTranpose3d'
def set_weight_bias(self, w: torch.Tensor, b: Optional[torch.Tensor]) -> None:
self._packed_params = torch.ops.quantized.conv_transpose3d_prepack(
w, b, self.stride, self.padding, self.output_padding, self.dilation,
self.groups)
def _weight_bias(self):
w, b = torch.ops.quantized.conv3d_unpack(self._packed_params)
return w, b
def weight(self):
(w, _) = self._weight_bias()
return w
def bias(self):
(_, b) = self._weight_bias()
return b
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 5:
raise ValueError("Input shape must be `(N, C, T, H, W)`!")
return ops.quantized.conv_transpose3d(
input, self._packed_params, self.scale, self.zero_point)
@classmethod
def from_reference(cls, ref_qconvt, output_scale, output_zero_point):
return _ConvTransposeNd.from_reference(cls, ref_qconvt, output_scale, output_zero_point)
|
pytorch-master
|
torch/nn/quantized/modules/conv.py
|
import torch
import torch.nn.quantized.functional
__all__ = ['LayerNorm', 'GroupNorm', 'InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d']
class LayerNorm(torch.nn.LayerNorm):
r"""This is the quantized version of :class:`~torch.nn.LayerNorm`.
Additional args:
* **scale** - quantization scale of the output, type: double.
* **zero_point** - quantization zero point of the output, type: long.
"""
def __init__(self, normalized_shape, weight, bias, scale, zero_point, eps=1e-5,
elementwise_affine=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(LayerNorm, self).__init__(
normalized_shape, eps=eps, elementwise_affine=elementwise_affine,
**factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.layer_norm(
input, self.normalized_shape, weight=self.weight, bias=self.bias,
eps=self.eps, output_scale=self.scale, output_zero_point=self.zero_point)
def _get_name(self):
return 'QuantizedLayerNorm'
@classmethod
def from_float(cls, mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = cls(
mod.normalized_shape, mod.weight, mod.bias, float(scale),
int(zero_point), mod.eps, mod.elementwise_affine)
return new_mod
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(
mod.normalized_shape, mod.weight, mod.bias, float(scale),
int(zero_point), mod.eps, mod.elementwise_affine)
class GroupNorm(torch.nn.GroupNorm):
r"""This is the quantized version of :class:`~torch.nn.GroupNorm`.
Additional args:
* **scale** - quantization scale of the output, type: double.
* **zero_point** - quantization zero point of the output, type: long.
"""
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine']
def __init__(self, num_groups, num_channels, weight, bias, scale, zero_point, eps=1e-5,
affine=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(GroupNorm, self).__init__(num_groups, num_channels, eps, affine,
**factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.group_norm(
input, self.num_groups, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
def _get_name(self):
return 'QuantizedGroupNorm'
@classmethod
def from_float(cls, mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = cls(
mod.num_groups, mod.num_channels, mod.weight, mod.bias, float(scale), int(zero_point),
mod.eps, mod.affine)
return new_mod
class InstanceNorm1d(torch.nn.InstanceNorm1d):
r"""This is the quantized version of :class:`~torch.nn.InstanceNorm1d`.
Additional args:
* **scale** - quantization scale of the output, type: double.
* **zero_point** - quantization zero point of the output, type: long.
"""
def __init__(self, num_features, weight, bias, scale, zero_point,
eps=1e-5, momentum=0.1, affine=False,
track_running_stats=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(InstanceNorm1d, self).__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
def _get_name(self):
return 'QuantizedInstanceNorm1d'
@classmethod
def from_float(cls, mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = cls(
mod.num_features, mod.weight, mod.bias, float(scale), int(zero_point),
mod.eps, mod.affine)
return new_mod
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(
mod.num_features, mod.weight, mod.bias, float(scale), int(zero_point),
mod.eps, mod.affine)
class InstanceNorm2d(torch.nn.InstanceNorm2d):
r"""This is the quantized version of :class:`~torch.nn.InstanceNorm2d`.
Additional args:
* **scale** - quantization scale of the output, type: double.
* **zero_point** - quantization zero point of the output, type: long.
"""
def __init__(self, num_features, weight, bias, scale, zero_point,
eps=1e-5, momentum=0.1, affine=False,
track_running_stats=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(InstanceNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
def _get_name(self):
return 'QuantizedInstanceNorm2d'
@classmethod
def from_float(cls, mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = cls(
mod.num_features, mod.weight, mod.bias, float(scale), int(zero_point),
mod.eps, mod.affine)
return new_mod
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(
mod.num_features, mod.weight, mod.bias, float(scale), int(zero_point),
mod.eps, mod.affine)
class InstanceNorm3d(torch.nn.InstanceNorm3d):
r"""This is the quantized version of :class:`~torch.nn.InstanceNorm3d`.
Additional args:
* **scale** - quantization scale of the output, type: double.
* **zero_point** - quantization zero point of the output, type: long.
"""
def __init__(self, num_features, weight, bias, scale, zero_point,
eps=1e-5, momentum=0.1, affine=False,
track_running_stats=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super(InstanceNorm3d, self).__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs)
self.weight = weight
self.bias = bias
self.register_buffer('scale', torch.tensor(scale, **factory_kwargs))
self.register_buffer('zero_point', torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale,
self.zero_point)
def _get_name(self):
return 'QuantizedInstanceNorm3d'
@classmethod
def from_float(cls, mod):
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = cls(
mod.num_features, mod.weight, mod.bias, float(scale), int(zero_point),
mod.eps, mod.affine)
return new_mod
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(
mod.num_features, mod.weight, mod.bias, float(scale), int(zero_point),
mod.eps, mod.affine)
|
pytorch-master
|
torch/nn/quantized/modules/normalization.py
|
import torch
class LSTM(torch.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples in :class:`~torch.nn.quantizable.LSTM`
Examples::
>>> # xdoctest: +SKIP
>>> custom_module_config = {
... 'float_to_observed_custom_module_class': {
... nn.LSTM: nn.quantizable.LSTM,
... },
... 'observed_to_quantized_custom_module_class': {
... nn.quantizable.LSTM: nn.quantized.LSTM,
... }
... }
>>> tq.prepare(model, prepare_custom_module_class=custom_module_config)
>>> tq.convert(model, convert_custom_module_class=custom_module_config)
"""
_FLOAT_MODULE = torch.nn.quantizable.LSTM
def _get_name(self):
return 'QuantizedLSTM'
@classmethod
def from_float(cls, *args, **kwargs):
# The whole flow is float -> observed -> quantized
# This class does observed -> quantized only
raise NotImplementedError("It looks like you are trying to convert a "
"non-observed LSTM module. Please, see "
"the examples on quantizable LSTMs.")
@classmethod
def from_observed(cls, other):
assert type(other) == cls._FLOAT_MODULE
converted = torch.ao.quantization.convert(other, inplace=False,
remove_qconfig=True)
converted.__class__ = cls
return converted
|
pytorch-master
|
torch/nn/quantized/modules/rnn.py
|
import torch
import torch.nn as nn
from torch import Tensor # noqa: F401
from torch._jit_internal import Optional, List # noqa: F401
from torch.nn.quantized.modules.utils import hide_packed_params_repr
from torch.nn.quantized.modules.utils import _quantize_weight
__all__ = ['EmbeddingPackedParams', 'Embedding', 'EmbeddingBag']
class EmbeddingPackedParams(torch.nn.Module):
_version = 1
def __init__(self, num_embeddings, embedding_dim, dtype=torch.quint8):
super(EmbeddingPackedParams, self).__init__()
self.dtype = dtype
if self.dtype in [torch.quint8, torch.quint4x2]:
scales = torch.ones(num_embeddings, dtype=torch.float)
zero_points = torch.zeros(num_embeddings, dtype=torch.float)
wq = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim], scales=scales,
zero_points=zero_points,
axis=0, dtype=self.dtype)
self.set_weight(wq)
else:
raise NotImplementedError(f'Unsupported dtype on quantized embedding! Supports quint8 and quint4x2. Got dtype: {dtype}')
@torch.jit.export
def set_weight(self, weight: torch.Tensor) -> None:
if self.dtype in [torch.quint8, torch.quint4x2]:
self._packed_weight = torch.ops.quantized.embedding_bag_prepack(weight)
else:
raise NotImplementedError('Unsupported dtype for quantized embedding prepack! Supports quint8 and quint4x2.')
@torch.jit.export
def _weight(self):
if self.dtype in [torch.quint8, torch.quint4x2]:
return torch.ops.quantized.embedding_bag_unpack(self._packed_weight)
else:
raise NotImplementedError('Unsupported dtype for quantized embedding unpack! Supports quint8 and quint4x2.')
def forward(self, x):
return x
# Version 1
# self
# |--- _packed_weight : Tensor representing weight of EmbeddingPackedParamsBase
# |--- dtype : torch.dtype
def _save_to_state_dict(self, destination, prefix, keep_vars):
super(EmbeddingPackedParams, self)._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + 'dtype'] = self.dtype
destination[prefix + '_packed_weight'] = self._weight()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
self.dtype = state_dict[prefix + 'dtype']
state_dict.pop(prefix + 'dtype')
weight = state_dict[prefix + '_packed_weight']
state_dict.pop(prefix + '_packed_weight')
self.set_weight(weight)
super(EmbeddingPackedParams, self)._load_from_state_dict(state_dict, prefix, local_metadata, False,
missing_keys, unexpected_keys, error_msgs)
def __repr__(self):
return self._weight().__repr__()
class Embedding(torch.nn.Module):
r"""
A quantized Embedding module with quantized packed weights as inputs.
We adopt the same interface as `torch.nn.Embedding`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.Embedding for documentation.
Similar to :class:`~torch.nn.Embedding`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module of
shape :math:`(\text{num\_embeddings}, \text{embedding\_dim})`.
Examples::
>>> m = nn.quantized.Embedding(num_embeddings=10, embedding_dim=12)
>>> indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8])
>>> output = m(indices)
>>> print(output.size())
torch.Size([9, 12])
"""
_version = 1
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
sparse: bool = False, _weight: Optional[Tensor] = None, dtype=torch.quint8) -> None:
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.dtype = dtype
if _weight is None:
scales = torch.ones(num_embeddings, dtype=torch.float)
zero_points = torch.zeros(num_embeddings, dtype=torch.float)
qweight = torch._empty_per_channel_affine_quantized([num_embeddings, embedding_dim],
scales=scales, zero_points=zero_points,
axis=0, dtype=torch.quint8)
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim], \
'Shape of weight does not match num_embeddings and embedding_dim'
qweight = _weight
self._packed_params = EmbeddingPackedParams(num_embeddings, embedding_dim, dtype)
self._packed_params.set_weight(qweight)
def forward(self, indices: Tensor) -> Tensor:
if self.dtype == torch.quint4x2:
return torch.ops.quantized.embedding_4bit(self._packed_params._packed_weight, indices)
else:
return torch.ops.quantized.embedding_byte(self._packed_params._packed_weight, indices)
def _get_name(self):
return 'QuantizedEmbedding'
def __repr__(self):
return hide_packed_params_repr(self, EmbeddingPackedParams)
def extra_repr(self):
extra_repr_str = 'num_embeddings={}, embedding_dim={}, dtype={}, qscheme={}'.format(
self.num_embeddings, self.embedding_dim, self._packed_params.dtype, self.weight().qscheme()
)
return extra_repr_str
def set_weight(self, w: torch.Tensor) -> None:
self._packed_params.set_weight(w)
def weight(self):
return self._packed_params._weight()
@classmethod
def from_float(cls, mod):
r"""Create a quantized embedding module from a float module
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by user
"""
if hasattr(mod, 'weight_fake_quant'):
assert type(mod) == nn.qat.Embedding, 'nnq.' + cls.__name__ + '.from_float ' + \
'with fake quant only works for ' + nn.qat.Embedding.__name__
weight_observer = mod.weight_fake_quant
activation_post_process = mod.activation_post_process
else:
assert type(mod) == nn.Embedding, 'nnq.' + cls.__name__ + '.from_float only works for ' + \
nn.Embedding.__name__
assert hasattr(mod, 'qconfig'), 'Embedding input float module must have qconfig defined'
from torch.ao.quantization import float_qparams_weight_only_qconfig
if mod.qconfig is not None and mod.qconfig.weight is not None: # type: ignore[union-attr]
weight_observer = mod.qconfig.weight() # type: ignore[union-attr, operator]
else:
weight_observer = float_qparams_weight_only_qconfig.weight()
dtype = weight_observer.dtype
is_float_qparams_qconfig = weight_observer.qscheme == torch.per_channel_affine_float_qparams
assert is_float_qparams_qconfig, \
'Embedding quantization is only supported with float_qparams_weight_only_qconfig.'
assert dtype == torch.quint8 or dtype == torch.quint4x2, \
f'The only supported dtype for nnq.Embedding is torch.quint8 and torch.quint4x2, got {dtype}'
# Run the observer to calculate qparams.
weight_observer(mod.weight)
qweight = _quantize_weight(mod.weight.float(), weight_observer)
# Create quantized Embedding module and pass in the quantized weight
qembedding = Embedding(mod.num_embeddings, mod.embedding_dim)
qembedding.set_weight(qweight)
return qembedding
@classmethod
def from_reference(cls, ref_embedding):
qembedding = cls(
ref_embedding.num_embeddings,
ref_embedding.embedding_dim,
ref_embedding.padding_idx,
ref_embedding.max_norm,
ref_embedding.norm_type,
ref_embedding.scale_grad_by_freq,
ref_embedding.sparse,
ref_embedding.get_quantized_weight(),
ref_embedding.weight_dtype,
)
return qembedding
class EmbeddingBag(Embedding):
r"""
A quantized EmbeddingBag module with quantized packed weights as inputs.
We adopt the same interface as `torch.nn.EmbeddingBag`, please see
https://pytorch.org/docs/stable/nn.html#torch.nn.EmbeddingBag for documentation.
Similar to :class:`~torch.nn.EmbeddingBag`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module of
shape :math:`(\text{num\_embeddings}, \text{embedding\_dim})`.
Examples::
>>> m = nn.quantized.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True, mode='sum')
>>> indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
>>> offsets = torch.tensor([0, 19, 20, 28, 28, 32])
>>> output = m(indices, offsets)
>>> print(output.size())
torch.Size([5, 12])
"""
_version = 1
def __init__(self, num_embeddings: int, embedding_dim: int,
max_norm: Optional[float] = None, norm_type: float = 2., scale_grad_by_freq: bool = False,
mode: str = 'sum', sparse: bool = False, _weight: Optional[Tensor] = None,
include_last_offset: bool = False, dtype=torch.quint8) -> None:
super(EmbeddingBag, self).__init__(num_embeddings, embedding_dim, _weight=_weight, dtype=dtype)
self.mode = mode
self.pruned_weights = False
self.include_last_offset = include_last_offset
self.dtype = dtype
def forward(self, indices: Tensor, offsets: Optional[Tensor] = None, per_sample_weights: Optional[Tensor] = None,
compressed_indices_mapping: Optional[Tensor] = None) -> Tensor:
if self.dtype == torch.quint4x2:
return torch.ops.quantized.embedding_bag_4bit(self._packed_params._packed_weight, indices, offsets, False, 0,
self.pruned_weights, per_sample_weights, compressed_indices_mapping,
self.include_last_offset)
else:
return torch.ops.quantized.embedding_bag_byte(self._packed_params._packed_weight, indices, offsets, False, 0,
self.pruned_weights, per_sample_weights, compressed_indices_mapping,
self.include_last_offset)
def _get_name(self):
return 'QuantizedEmbeddingBag'
@classmethod
def from_float(cls, mod):
r"""Create a quantized embedding_bag module from a float module
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by user
"""
if hasattr(mod, 'weight_fake_quant'):
weight_observer = mod.weight_fake_quant
else:
assert type(mod) == nn.EmbeddingBag, 'nnq.' + cls.__name__ + '.from_float only works for ' + \
nn.EmbeddingBag.__name__
assert hasattr(mod, 'qconfig'), 'EmbeddingBag input float module must have qconfig defined'
from torch.ao.quantization.qconfig import float_qparams_weight_only_qconfig
if mod.qconfig is not None and mod.qconfig.weight is not None: # type: ignore[union-attr]
weight_observer = mod.qconfig.weight() # type: ignore[union-attr, operator]
else:
weight_observer = float_qparams_weight_only_qconfig.weight()
dtype = weight_observer.dtype
is_float_qparams_qconfig = weight_observer.qscheme == torch.per_channel_affine_float_qparams
assert is_float_qparams_qconfig, \
'EmbeddingBag quantization is only supported with float_qparams_weight_only_qconfig.'
assert dtype == torch.quint8 or dtype == torch.quint4x2, \
f'The only supported dtype for nnq.EmbeddingBag is torch.quint8 and torch.quint4x2, got {dtype}'
# Run the observer to calculate qparams.
weight_observer(mod.weight)
qweight = _quantize_weight(mod.weight.float(), weight_observer)
# Create quantized EmbeddingBag module and pass in the quantized weight
qembedding_bag = EmbeddingBag(mod.num_embeddings, mod.embedding_dim, dtype=dtype)
qembedding_bag.set_weight(qweight)
return qembedding_bag
@classmethod
def from_reference(cls, ref_embedding_bag):
qembedding_bag = cls(
ref_embedding_bag.num_embeddings,
ref_embedding_bag.embedding_dim,
ref_embedding_bag.max_norm,
ref_embedding_bag.norm_type,
ref_embedding_bag.scale_grad_by_freq,
ref_embedding_bag.mode,
ref_embedding_bag.sparse,
ref_embedding_bag.get_quantized_weight(),
ref_embedding_bag.include_last_offset,
ref_embedding_bag.weight_dtype,
)
return qembedding_bag
|
pytorch-master
|
torch/nn/quantized/modules/embedding_ops.py
|
# this is for historical pickle deserilaization, it is not used otherwise
def _get_thnn_function_backend():
pass
|
pytorch-master
|
torch/nn/backends/thnn.py
|
pytorch-master
|
torch/nn/backends/__init__.py
|
|
"""
Spectral Normalization from https://arxiv.org/abs/1802.05957
"""
import torch
from torch.nn.functional import normalize
from typing import Any, Optional, TypeVar
from ..modules import Module
__all__ = ['SpectralNorm', 'SpectralNormLoadStateDictPreHook', 'SpectralNormStateDictHook',
'spectral_norm', 'remove_spectral_norm']
class SpectralNorm:
# Invariant before and after each forward call:
# u = normalize(W @ v)
# NB: At initialization, this invariant is not enforced
_version: int = 1
# At version 1:
# made `W` not a buffer,
# added `v` as a buffer, and
# made eval mode use `W = u @ W_orig @ v` rather than the stored `W`.
name: str
dim: int
n_power_iterations: int
eps: float
def __init__(self, name: str = 'weight', n_power_iterations: int = 1, dim: int = 0, eps: float = 1e-12) -> None:
self.name = name
self.dim = dim
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor:
weight_mat = weight
if self.dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(self.dim,
*[d for d in range(weight_mat.dim()) if d != self.dim])
height = weight_mat.size(0)
return weight_mat.reshape(height, -1)
def compute_weight(self, module: Module, do_power_iteration: bool) -> torch.Tensor:
# NB: If `do_power_iteration` is set, the `u` and `v` vectors are
# updated in power iteration **in-place**. This is very important
# because in `DataParallel` forward, the vectors (being buffers) are
# broadcast from the parallelized module to each module replica,
# which is a new module object created on the fly. And each replica
# runs its own spectral norm power iteration. So simply assigning
# the updated vectors to the module this function runs on will cause
# the update to be lost forever. And the next time the parallelized
# module is replicated, the same randomly initialized vectors are
# broadcast and used!
#
# Therefore, to make the change propagate back, we rely on two
# important behaviors (also enforced via tests):
# 1. `DataParallel` doesn't clone storage if the broadcast tensor
# is already on correct device; and it makes sure that the
# parallelized module is already on `device[0]`.
# 2. If the out tensor in `out=` kwarg has correct shape, it will
# just fill in the values.
# Therefore, since the same power iteration is performed on all
# devices, simply updating the tensors in-place will make sure that
# the module replica on `device[0]` will update the _u vector on the
# parallized module (by shared storage).
#
# However, after we update `u` and `v` in-place, we need to **clone**
# them before using them to normalize the weight. This is to support
# backproping through two forward passes, e.g., the common pattern in
# GAN training: loss = D(real) - D(fake). Otherwise, engine will
# complain that variables needed to do backward for the first forward
# (i.e., the `u` and `v` vectors) are changed in the second forward.
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
v = getattr(module, self.name + '_v')
weight_mat = self.reshape_weight_to_matrix(weight)
if do_power_iteration:
with torch.no_grad():
for _ in range(self.n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)
u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone(memory_format=torch.contiguous_format)
v = v.clone(memory_format=torch.contiguous_format)
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = weight / sigma
return weight
def remove(self, module: Module) -> None:
with torch.no_grad():
weight = self.compute_weight(module, do_power_iteration=False)
delattr(module, self.name)
delattr(module, self.name + '_u')
delattr(module, self.name + '_v')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module: Module, inputs: Any) -> None:
setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training))
def _solve_v_and_rescale(self, weight_mat, u, target_sigma):
# Tries to returns a vector `v` s.t. `u = normalize(W @ v)`
# (the invariant at top of this class) and `u @ W @ v = sigma`.
# This uses pinverse in case W^T W is not invertible.
v = torch.linalg.multi_dot([weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)]).squeeze(1)
return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))
@staticmethod
def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm':
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
raise RuntimeError("Cannot register two spectral_norm hooks on "
"the same parameter {}".format(name))
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
if weight is None:
raise ValueError(f'`SpectralNorm` cannot be applied as parameter `{name}` is None')
if isinstance(weight, torch.nn.parameter.UninitializedParameter):
raise ValueError(
'The module passed to `SpectralNorm` can\'t have uninitialized parameters. '
'Make sure to run the dummy forward before applying spectral normalization')
with torch.no_grad():
weight_mat = fn.reshape_weight_to_matrix(weight)
h, w = weight_mat.size()
# randomly initialize `u` and `v`
u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps)
v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a plain
# attribute.
setattr(module, fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_buffer(fn.name + "_v", v)
module.register_forward_pre_hook(fn)
module._register_state_dict_hook(SpectralNormStateDictHook(fn))
module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn))
return fn
# This is a top level class because Py2 pickle doesn't like inner class nor an
# instancemethod.
class SpectralNormLoadStateDictPreHook:
# See docstring of SpectralNorm._version on the changes to spectral_norm.
def __init__(self, fn) -> None:
self.fn = fn
# For state_dict with version None, (assuming that it has gone through at
# least one training forward), we have
#
# u = normalize(W_orig @ v)
# W = W_orig / sigma, where sigma = u @ W_orig @ v
#
# To compute `v`, we solve `W_orig @ x = u`, and let
# v = x / (u @ W_orig @ x) * (W / W_orig).
def __call__(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs) -> None:
fn = self.fn
version = local_metadata.get('spectral_norm', {}).get(fn.name + '.version', None)
if version is None or version < 1:
weight_key = prefix + fn.name
if version is None and all(weight_key + s in state_dict for s in ('_orig', '_u', '_v')) and \
weight_key not in state_dict:
# Detect if it is the updated state dict and just missing metadata.
# This could happen if the users are crafting a state dict themselves,
# so we just pretend that this is the newest.
return
has_missing_keys = False
for suffix in ('_orig', '', '_u'):
key = weight_key + suffix
if key not in state_dict:
has_missing_keys = True
if strict:
missing_keys.append(key)
if has_missing_keys:
return
with torch.no_grad():
weight_orig = state_dict[weight_key + '_orig']
weight = state_dict.pop(weight_key)
sigma = (weight_orig / weight).mean()
weight_mat = fn.reshape_weight_to_matrix(weight_orig)
u = state_dict[weight_key + '_u']
v = fn._solve_v_and_rescale(weight_mat, u, sigma)
state_dict[weight_key + '_v'] = v
# This is a top level class because Py2 pickle doesn't like inner class nor an
# instancemethod.
class SpectralNormStateDictHook:
# See docstring of SpectralNorm._version on the changes to spectral_norm.
def __init__(self, fn) -> None:
self.fn = fn
def __call__(self, module, state_dict, prefix, local_metadata) -> None:
if 'spectral_norm' not in local_metadata:
local_metadata['spectral_norm'] = {}
key = self.fn.name + '.version'
if key in local_metadata['spectral_norm']:
raise RuntimeError("Unexpected key in metadata['spectral_norm']: {}".format(key))
local_metadata['spectral_norm'][key] = self.fn._version
T_module = TypeVar('T_module', bound=Module)
def spectral_norm(module: T_module,
name: str = 'weight',
n_power_iterations: int = 1,
eps: float = 1e-12,
dim: Optional[int] = None) -> T_module:
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})},
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generative Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectral norm
eps (float, optional): epsilon for numerical stability in
calculating norms
dim (int, optional): dimension corresponding to number of outputs,
the default is ``0``, except for modules that are instances of
ConvTranspose{1,2,3}d, when it is ``1``
Returns:
The original module with the spectral norm hook
.. note::
This function has been reimplemented as
:func:`torch.nn.utils.parametrizations.spectral_norm` using the new
parametrization functionality in
:func:`torch.nn.utils.parametrize.register_parametrization`. Please use
the newer version. This function will be deprecated in a future version
of PyTorch.
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
>>> m
Linear(in_features=20, out_features=40, bias=True)
>>> m.weight_u.size()
torch.Size([40])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
return module
def remove_spectral_norm(module: T_module, name: str = 'weight') -> T_module:
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
break
else:
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module))
for k, hook in module._state_dict_hooks.items():
if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name:
del module._state_dict_hooks[k]
break
for k, hook in module._load_state_dict_pre_hooks.items():
if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name:
del module._load_state_dict_pre_hooks[k]
break
return module
|
pytorch-master
|
torch/nn/utils/spectral_norm.py
|
import torch
from typing import Iterable, Optional
def parameters_to_vector(parameters: Iterable[torch.Tensor]) -> torch.Tensor:
r"""Convert parameters to one vector
Args:
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
Returns:
The parameters represented by a single vector
"""
# Flag for the device where the parameter is located
param_device = None
vec = []
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
vec.append(param.view(-1))
return torch.cat(vec)
def vector_to_parameters(vec: torch.Tensor, parameters: Iterable[torch.Tensor]) -> None:
r"""Convert one vector to the parameters
Args:
vec (Tensor): a single vector represents the parameters of a model.
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
"""
# Ensure vec of type Tensor
if not isinstance(vec, torch.Tensor):
raise TypeError('expected torch.Tensor, but got: {}'
.format(torch.typename(vec)))
# Flag for the device where the parameter is located
param_device = None
# Pointer for slicing the vector for each parameter
pointer = 0
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
# The length of the parameter
num_param = param.numel()
# Slice the vector, reshape it, and replace the old data of the parameter
param.data = vec[pointer:pointer + num_param].view_as(param).data
# Increment the pointer
pointer += num_param
def _check_param_device(param: torch.Tensor, old_param_device: Optional[int]) -> int:
r"""This helper function is to check if the parameters are located
in the same device. Currently, the conversion between model parameters
and single vector form is not supported for multiple allocations,
e.g. parameters in different GPUs, or mixture of CPU/GPU.
Args:
param ([Tensor]): a Tensor of a parameter of a model
old_param_device (int): the device where the first parameter of a
model is allocated.
Returns:
old_param_device (int): report device for the first time
"""
# Meet the first parameter
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError('Found two parameters on different devices, '
'this is currently not supported.')
return old_param_device
|
pytorch-master
|
torch/nn/utils/convert_parameters.py
|
import contextlib
from typing import Any, Callable, Dict, Iterator, List, Tuple
import torch
from torch import Tensor
__all__ = ["functional_call"]
# We avoid typing module here because module attributes are declared as Union[Parameter, Tensor] by default
# and using other types causes mypy errors
def _change_class(module, params_and_buffers) -> None:
cls = module.__class__
attr_to_path : Dict[str, str] = module._attr_to_path
def _getattribute(self, name: str) -> Any:
if name in attr_to_path:
return params_and_buffers[attr_to_path[name]]
return cls.__getattribute__(self, name)
def _setattr(self, name: str, value: Any) -> None:
if name in attr_to_path:
params_and_buffers[attr_to_path[name]] = value
else:
return cls.__setattr__(self, name, value)
param_cls = type(
f"StatelessReplacer{cls.__name__}",
(cls,),
{
"__getattribute__": _getattribute,
"__setattr__": _setattr,
},
)
module.__class__ = param_cls
module._orig_class = cls
def _create_swap_params(params_and_buffers):
def _swap_parameters(module, tensor_name: str, full_path: str, tensor: Tensor) -> None:
# Changes the module class to get a new __getattr__ dunder method
# that looks for the reparametrized tensor
if hasattr(module, "_attr_to_path"):
module._attr_to_path[tensor_name] = full_path
else:
module._attr_to_path = {}
module._attr_to_path[tensor_name] = full_path
_change_class(module, params_and_buffers)
return _swap_parameters
def _remove_swap(module, name: str, full_path: str) -> None:
if hasattr(module, "_orig_class"):
module.__class__ = module._orig_class
delattr(module, "_orig_class")
delattr(module, "_attr_to_path")
@contextlib.contextmanager
def _reparametrize_module(
module: 'torch.nn.Module',
parameters_and_buffers: Dict[str, Tensor],
) -> Iterator[None]:
for name, tensor in parameters_and_buffers.items():
_apply_func_submodules(
_create_swap_params(parameters_and_buffers),
module, name.split("."), name, (tensor,))
try:
yield
finally:
for name in parameters_and_buffers:
_apply_func_submodules(
_remove_swap,
module, name.split("."), name, ())
def _apply_func_submodules(
func: Callable[..., None],
module: 'torch.nn.Module',
path: List[str],
full_path: str,
args: Tuple,
):
if len(path) == 1:
func(module, path[0], full_path, *args)
else:
_apply_func_submodules(func, getattr(module, path[0]), path[1:], full_path, args)
def functional_call(
module: 'torch.nn.Module',
parameters_and_buffers: Dict[str, Tensor],
args: Tuple,
kwargs : Dict[str, Any] = None,
):
r"""Performs a functional call on the module by replacing the module parameters
and buffers with the provided ones.
.. note:: If the module has active parametrizations, passing a value in the
:attr:`parameters_and_buffers` argument with the name set to the regular parameter
name will completely disable the parametrization.
If you want to apply the parametrization function to the value passed
please set the key as ``{submodule_name}.parametrizations.{parameter_name}.original``.
.. note:: If the module performs in-place operations on parameters/buffers, these will be reflected
in the `parameters_and_buffers` input.
Example::
>>> a = {'foo': torch.zeros(())}
>>> # xdoctest: +SKIP
>>> mod = Foo() # does self.foo = self.foo + 1
>>> print(mod.foo) # tensor(0.)
>>> functional_call(mod, a, torch.ones(()))
>>> print(mod.foo) # tensor(0.)
>>> print(a['foo']) # tensor(1.)
Args:
module (torch.nn.Module): the module to call
parameters_and_buffers (dict of str and Tensor): the parameters that will be used in
the module call.
args (tuple): arguments to be passed to the module call
kwargs (dict): keyword arguments to be passed to the module call
Returns:
Any: the result of calling ``module``.
"""
# TODO allow kwargs such as unsafe and others for parametrization
if (
torch.jit.is_tracing()
or torch.jit.is_scripting()
or isinstance(module, (
torch.jit.RecursiveScriptModule,
torch.jit.ScriptModule,
torch.jit.ScriptFunction)
)
):
raise RuntimeError("The stateless API can't be used with Jitted modules")
if kwargs is None:
kwargs = {}
with _reparametrize_module(module, parameters_and_buffers):
if isinstance(args, tuple):
out = module(*args, **kwargs)
else:
out = module(args, **kwargs)
return out
|
pytorch-master
|
torch/nn/utils/stateless.py
|
import torch
from torch.nn.modules.container import ModuleList, ModuleDict, Module
from torch.nn.parameter import Parameter
from torch import Tensor
import collections
import copyreg
from copy import deepcopy
from contextlib import contextmanager
from typing import Union, Optional, Dict, Tuple, Sequence
__all__ = ['cached', 'ParametrizationList', 'register_parametrization', 'is_parametrized', 'remove_parametrizations',
'type_before_parametrizations', 'transfer_parametrizations_and_params']
_cache_enabled = 0
_cache: Dict[Tuple[int, str], Optional[Tensor]] = {}
@contextmanager
def cached():
r"""Context manager that enables the caching system within parametrizations
registered with :func:`register_parametrization`.
The value of the parametrized objects is computed and cached the first time
they are required when this context manager is active. The cached values are
discarded when leaving the context manager.
This is useful when using a parametrized parameter more than once in the forward pass.
An example of this is when parametrizing the recurrent kernel of an RNN or when
sharing weights.
The simplest way to activate the cache is by wrapping the forward pass of the neural network
.. code-block:: python
import torch.nn.utils.parametrize as P
...
with P.cached():
output = model(inputs)
in training and evaluation. One may also wrap the parts of the modules that use
several times the parametrized tensors. For example, the loop of an RNN with a
parametrized recurrent kernel:
.. code-block:: python
with P.cached():
for x in xs:
out_rnn = self.rnn_cell(x, out_rnn)
"""
global _cache
global _cache_enabled
_cache_enabled += 1
try:
yield
finally:
_cache_enabled -= 1
if not _cache_enabled:
_cache = {}
def _register_parameter_or_buffer(module, name, X):
if isinstance(X, Parameter):
module.register_parameter(name, X)
else:
module.register_buffer(name, X)
class ParametrizationList(ModuleList):
r"""A sequential container that holds and manages the ``original`` or ``original0``, ``original1``, ...
parameters or buffers of a parametrized :class:`torch.nn.Module`.
It is the type of ``module.parametrizations[tensor_name]`` when ``module[tensor_name]``
has been parametrized with :func:`register_parametrization`.
If the first registered parmetrization has a ``right_inverse`` that returns one tensor or
does not have a ``right_inverse`` (in which case we assume that ``right_inverse`` is the identity),
it will hold the tensor under the name ``original``.
If it has a ``right_inverse`` that returns more than one tensor, these will be registered as
``original0``, ``original1``, ...
.. warning::
This class is used internally by :func:`register_parametrization`. It is documented
here for completeness. It shall not be instantiated by the user.
Args:
modules (sequence): sequence of modules representing the parametrizations
original (Parameter or Tensor): parameter or buffer that is parametrized
unsafe (bool): a boolean flag that denotes whether the parametrization
may change the dtype and shape of the tensor. Default: `False`
Warning: the parametrization is not checked for consistency upon registration.
Enable this flag at your own risk.
"""
original: Tensor
unsafe: bool
def __init__(
self, modules: Sequence[Module], original: Union[Tensor, Parameter], unsafe: bool = False
) -> None:
# We require this because we need to treat differently the first parametrization
# This should never throw, unless this class is used from the outside
if len(modules) == 0:
raise ValueError("ParametrizationList requires one or more modules.")
super().__init__(modules)
self.unsafe = unsafe
# In plain words:
# module.weight must keep its dtype and shape.
# Furthermore, if there is no right_inverse or the right_inverse returns a tensor,
# this should be of the same dtype as the original tensor
#
# We check that the following invariants hold:
# X = module.weight
# Y = param.right_inverse(X)
# assert isinstance(Y, Tensor) or
# (isinstance(Y, collections.abc.Sequence) and all(isinstance(t, Tensor) for t in Y))
# Z = param(Y) if isisntance(Y, Tensor) else param(*Y)
# # Consistency checks
# assert X.dtype == Z.dtype and X.shape == Z.shape
# # If it has one input, this allows to be able to use set_ to be able to
# # move data to/from the original tensor without changing its id (which is what the
# # optimiser uses to track parameters)
# if isinstance(Y, Tensor)
# assert X.dtype == Y.dtype
# Below we use original = X, new = Y
original_shape = original.shape
original_dtype = original.dtype
# Compute new
with torch.no_grad():
new = original
for module in reversed(self): # type: ignore[call-overload]
if hasattr(module, "right_inverse"):
try:
new = module.right_inverse(new)
except NotImplementedError:
pass
# else, or if it throws, we assume that right_inverse is the identity
if not isinstance(new, Tensor) and not isinstance(new, collections.abc.Sequence):
raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors (list, tuple...). "
f"Got {type(new).__name__}")
# Set the number of original tensors
self.is_tensor = isinstance(new, Tensor)
self.ntensors = 1 if self.is_tensor else len(new)
# Register the tensor(s)
if self.is_tensor:
if original.dtype != new.dtype:
raise ValueError(
"When `right_inverse` outputs one tensor, it may not change the dtype.\n"
f"original.dtype: {original.dtype}\n"
f"right_inverse(original).dtype: {new.dtype}"
)
# Set the original to original so that the user does not need to re-register the parameter
# manually in the optimiser
with torch.no_grad():
original.set_(new) # type: ignore[call-overload]
_register_parameter_or_buffer(self, "original", original)
else:
for i, originali in enumerate(new):
if not isinstance(originali, Tensor):
raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors "
"(list, tuple...). "
f"Got element {i} of the sequence with type {type(originali).__name__}.")
# If the original tensor was a Parameter that required grad, we expect the user to
# add the new parameters to the optimizer after registering the parametrization
# (this is documented)
if isinstance(original, Parameter):
originali = Parameter(originali)
originali.requires_grad_(original.requires_grad)
_register_parameter_or_buffer(self, f"original{i}", originali)
if not self.unsafe:
# Consistency checks:
# Since f : A -> B, right_inverse : B -> A, Z and original should live in B
# Z = forward(right_inverse(original))
Z = self()
if not isinstance(Z, Tensor):
raise ValueError(
f"A parametrization must return a tensor. Got {type(Z).__name__}."
)
if Z.dtype != original_dtype:
raise ValueError(
"Registering a parametrization may not change the dtype of the tensor, unless `unsafe` flag is enabled.\n"
f"unparametrized dtype: {original_dtype}\n"
f"parametrized dtype: {Z.dtype}"
)
if Z.shape != original_shape:
raise ValueError(
"Registering a parametrization may not change the shape of the tensor, unless `unsafe` flag is enabled.\n"
f"unparametrized shape: {original_shape}\n"
f"parametrized shape: {Z.shape}"
)
def right_inverse(self, value: Tensor) -> None:
r"""Calls the methods ``right_inverse`` (see :func:`register_parametrization`)
of the parametrizations in the inverse order they were registered in.
Then, it stores the result in ``self.original`` if ``right_inverse`` outputs one tensor
or in ``self.original0``, ``self.original1``, ... if it outputs several.
Args:
value (Tensor): Value to which initialize the module
"""
# All the exceptions in this function should almost never throw.
# They could throw if, for example, right_inverse function returns a different
# dtype when given a different input, which should most likely be caused by a
# bug in the user's code
with torch.no_grad():
# See https://github.com/pytorch/pytorch/issues/53103
for module in reversed(self): # type: ignore[call-overload]
if hasattr(module, "right_inverse"):
value = module.right_inverse(value)
else:
raise RuntimeError(f"parametrization {type(module).__name__} does not implement "
"right_inverse.")
if self.is_tensor:
# These exceptions should only throw when a right_inverse function does not
# return the same dtype for every input, which should most likely be caused by a bug
if not isinstance(value, Tensor):
raise ValueError(
f"`right_inverse` should return a tensor. Got {type(value).__name__}"
)
if value.dtype != self.original.dtype:
raise ValueError(
f"The tensor returned by `right_inverse` has dtype {value.dtype} "
f"while `original` has dtype {self.original.dtype}"
)
# We know that the result is going to have the same dtype
self.original.set_(value) # type: ignore[call-overload]
else:
if not isinstance(value, collections.abc.Sequence):
raise ValueError(
"'right_inverse' must return a sequence of tensors. "
f"Got {type(value).__name__}."
)
if len(value) != self.ntensors:
raise ValueError(
"'right_inverse' must return a sequence of tensors of length "
f"{self.ntensors}. Got a sequence of lenght {len(value)}."
)
for i, tensor in enumerate(value):
original_i = getattr(self, f"original{i}")
if not isinstance(tensor, Tensor):
raise ValueError(
f"`right_inverse` must return a sequence of tensors. "
f"Got element {i} of type {type(tensor).__name__}"
)
if original_i.dtype != tensor.dtype:
raise ValueError(
f"Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} "
f"while `original{i}` has dtype {original_i.dtype}"
)
original_i.set_(tensor)
def forward(self) -> Tensor:
if torch.jit.is_scripting():
raise RuntimeError('Parametrization is not working with scripting.')
# Unpack the originals for the first parametrization
if self.is_tensor:
x = self[0](self.original)
else:
originals = (getattr(self, f"original{i}") for i in range(self.ntensors))
x = self[0](*originals)
# It's not possible to call self[1:] here, so we have to be a bit more cryptic
# Also we want to skip all non-integer keys
curr_idx = 1
while hasattr(self, str(curr_idx)):
x = self[curr_idx](x)
curr_idx += 1
return x
def _inject_new_class(module: Module) -> None:
r"""Sets up a module to be parametrized.
This works by substituting the class of the module by a class
that extends it to be able to inject a property
Args:
module (nn.Module): module into which to inject the property
"""
cls = module.__class__
def default_deepcopy(self, memo):
# Just emulate a standard deepcopy procedure when __deepcopy__ doesn't exist in the current class.
obj = memo.get(id(self), None)
if obj is not None:
return obj
replica = self.__new__(self.__class__)
memo[id(self)] = replica
replica.__dict__ = deepcopy(self.__dict__, memo)
# Also save all slots if they exist.
slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
for slot in slots_to_save:
if hasattr(self, slot):
setattr(replica, slot, deepcopy(getattr(self, slot), memo))
return replica
def getstate(self):
raise RuntimeError(
"Serialization of parametrized modules is only "
"supported through state_dict(). See:\n"
"https://pytorch.org/tutorials/beginner/saving_loading_models.html"
"#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training"
)
dct = {"__getstate__": getstate}
# We don't allow serialization of parametrized modules but should still allow deepcopying.
# Default 'deepcopy' function invokes __deepcopy__ method instead of __getstate__ when it exists.
if not hasattr(cls, "__deepcopy__"):
dct["__deepcopy__"] = default_deepcopy # type: ignore[assignment]
param_cls = type(
f"Parametrized{cls.__name__}",
(cls,),
dct,
)
module.__class__ = param_cls
def _inject_property(module: Module, tensor_name: str) -> None:
r"""Injects a property into module[tensor_name].
It assumes that the class in the module has already been modified from its
original one using _inject_new_class and that the tensor under :attr:`tensor_name`
has already been moved out
Args:
module (nn.Module): module into which to inject the property
tensor_name (str): name of the name of the property to create
"""
# We check the precondition.
# This should never fire if register_parametrization is correctly implemented
assert not hasattr(module, tensor_name)
@torch.jit.unused
def get_cached_parametrization(parametrization) -> Tensor:
global _cache
key = (id(module), tensor_name)
tensor = _cache.get(key)
if tensor is None:
tensor = parametrization()
_cache[key] = tensor
return tensor
def get_parametrized(self) -> Tensor:
if torch.jit.is_scripting():
raise RuntimeError('Parametrization is not working with scripting.')
parametrization = self.parametrizations[tensor_name]
if _cache_enabled:
if torch.jit.is_scripting():
# Scripting
raise RuntimeError('Caching is not implemented for scripting. '
'Either disable caching or avoid scripting.')
elif torch._C._get_tracing_state() is not None:
# Tracing
raise RuntimeError('Cannot trace a model while caching parametrizations.')
else:
return get_cached_parametrization(parametrization)
else:
# If caching is not active, this function just evaluates the parametrization
return parametrization()
def set_original(self, value: Tensor) -> None:
if torch.jit.is_scripting():
raise RuntimeError('Parametrization is not working with scripting.')
self.parametrizations[tensor_name].right_inverse(value)
setattr(module.__class__, tensor_name, property(get_parametrized, set_original))
def register_parametrization(
module: Module, tensor_name: str, parametrization: Module, *, unsafe: bool = False,
) -> Module:
r"""Adds a parametrization to a tensor in a module.
Assume that ``tensor_name="weight"`` for simplicity. When accessing ``module.weight``,
the module will return the parametrized version ``parametrization(module.weight)``.
If the original tensor requires a gradient, the backward pass will differentiate
through :attr:`parametrization`, and the optimizer will update the tensor accordingly.
The first time that a module registers a parametrization, this function will add an attribute
``parametrizations`` to the module of type :class:`~ParametrizationList`.
The list of parametrizations on the tensor ``weight`` will be accessible under
``module.parametrizations.weight``.
The original tensor will be accessible under
``module.parametrizations.weight.original``.
Parametrizations may be concatenated by registering several parametrizations
on the same attribute.
The training mode of a registered parametrization is updated on registration
to match the training mode of the host module
Parametrized parameters and buffers have an inbuilt caching system that can be activated
using the context manager :func:`cached`.
A :attr:`parametrization` may optionally implement a method with signature
.. code-block:: python
def right_inverse(self, X: Tensor) -> Union[Tensor, Sequence[Tensor]]
This method is called on the unparametrized tensor when the first parametrization
is registered to compute the initial value of the original tensor.
If this method is not implemented, the original tensor will be just the unparametrized tensor.
If all the parametrizations registered on a tensor implement `right_inverse` it is possible
to initialize a parametrized tensor by assigning to it, as shown in the example below.
It is possible for the first parametrization to depend on several inputs.
This may be implemented returning a tuple of tensors from ``right_inverse``
(see the example implementation of a ``RankOne`` parametrization below).
In this case, the unconstrained tensors are also located under ``module.parametrizations.weight``
with names ``original0``, ``original1``,...
.. note::
If unsafe=False (default) both the forward and right_inverse methods will be called
once to perform a number of consistency checks.
If unsafe=True, then right_inverse will be called if the tensor is not parametrized,
and nothing will be called otherwise.
.. note::
In most situations, ``right_inverse`` will be a function such that
``forward(right_inverse(X)) == X`` (see
`right inverse <https://en.wikipedia.org/wiki/Inverse_function#Right_inverses>`_).
Sometimes, when the parametrization is not surjective, it may be reasonable
to relax this.
.. warning::
If a parametrization depends on several inputs, :func:`~register_parametrization`
will register a number of new parameters. If such parametrization is registered
after the optimizer is created, these new parameters will need to be added manually
to the optimizer. See :meth:`torch.Optimizer.add_param_group`.
Args:
module (nn.Module): module on which to register the parametrization
tensor_name (str): name of the parameter or buffer on which to register
the parametrization
parametrization (nn.Module): the parametrization to register
Keyword args:
unsafe (bool): a boolean flag that denotes whether the parametrization
may change the dtype and shape of the tensor. Default: `False`
Warning: the parametrization is not checked for consistency upon registration.
Enable this flag at your own risk.
Raises:
ValueError: if the module does not have a parameter or a buffer named :attr:`tensor_name`
Examples:
>>> # xdoctest: +REQUIRES(--lapack)
>>> import torch
>>> import torch.nn as nn
>>> import torch.nn.utils.parametrize as P
>>>
>>> class Symmetric(nn.Module):
>>> def forward(self, X):
>>> return X.triu() + X.triu(1).T # Return a symmetric matrix
>>>
>>> def right_inverse(self, A):
>>> return A.triu()
>>>
>>> m = nn.Linear(5, 5)
>>> P.register_parametrization(m, "weight", Symmetric())
>>> print(torch.allclose(m.weight, m.weight.T)) # m.weight is now symmetric
True
>>> A = torch.rand(5, 5)
>>> A = A + A.T # A is now symmetric
>>> m.weight = A # Initialize the weight to be the symmetric matrix A
>>> print(torch.allclose(m.weight, A))
True
>>> class RankOne(nn.Module):
>>> def forward(self, x, y):
>>> # Form a rank 1 matrix multiplying two vectors
>>> return x.unsqueeze(-1) @ y.unsqueeze(-2)
>>>
>>> def right_inverse(self, Z):
>>> # Project Z onto the rank 1 matrices
>>> U, S, Vh = torch.linalg.svd(Z, full_matrices=False)
>>> # Return rescaled singular vectors
>>> s0_sqrt = S[0].sqrt().unsqueeze(-1)
>>> return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
>>>
>>> linear_rank_one = P.register_parametrization(nn.Linear(4, 4), "weight", RankOne())
>>> print(torch.linalg.matrix_rank(linear_rank_one.weight).item())
1
"""
parametrization.train(module.training)
if is_parametrized(module, tensor_name):
# Correctness checks.
# If A is the space of tensors with shape and dtype equal to module.weight
# we check that parametrization.forward and parametrization.right_inverse are
# functions from A to A
if not unsafe:
Y = getattr(module, tensor_name)
X = parametrization(Y)
if not isinstance(X, Tensor):
raise ValueError(
f"A parametrization must return a tensor. Got {type(X).__name__}."
)
if X.dtype != Y.dtype:
raise ValueError(
"Registering a parametrization may not change the dtype of the tensor, unless the `unsafe` flag is enabled.\n"
f"module.{tensor_name}.dtype: {Y.dtype}\n"
f"parametrization(module.{tensor_name}).dtype: {X.dtype}"
)
if X.shape != Y.shape:
raise ValueError(
"Registering a parametrization may not change the shape of the tensor, unless the `unsafe` flag is enabled.\n"
f"module.{tensor_name}.shape: {Y.shape}\n"
f"parametrization(module.{tensor_name}).shape: {X.shape}"
)
if hasattr(parametrization, "right_inverse"):
try:
Z = parametrization.right_inverse(X) # type: ignore[operator]
except NotImplementedError:
pass
else:
if not isinstance(Z, Tensor):
raise ValueError(
f"parametrization.right_inverse must return a tensor. Got: {type(Z).__name__}"
)
if Z.dtype != Y.dtype:
raise ValueError(
"The tensor returned by parametrization.right_inverse must have the same dtype "
f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n"
f"module.{tensor_name}.dtype: {Y.dtype}\n"
f"returned dtype: {Z.dtype}"
)
if Z.shape != Y.shape:
raise ValueError(
"The tensor returned by parametrization.right_inverse must have the same shape "
f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n"
f"module.{tensor_name}.shape: {Y.shape}\n"
f"returned shape: {Z.shape}"
)
# else right_inverse is assumed to be the identity
# add the new parametrization to the parametrization list
assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
module.parametrizations[tensor_name].append(parametrization)
# If unsafe was True in previous parametrization, keep it enabled
module.parametrizations[tensor_name].unsafe |= unsafe # type: ignore[index, union-attr]
elif tensor_name in module._buffers or tensor_name in module._parameters:
# Set the parametrization mechanism
# Fetch the original buffer or parameter
original = getattr(module, tensor_name)
# We create this early to check for possible errors
parametrizations = ParametrizationList([parametrization], original, unsafe=unsafe)
# Delete the previous parameter or buffer
delattr(module, tensor_name)
# If this is the first parametrization registered on the module,
# we prepare the module to inject the property
if not is_parametrized(module):
# Change the class
_inject_new_class(module)
# Inject a ``ModuleDict`` into the instance under module.parametrizations
module.parametrizations = ModuleDict()
# Add a property into the class
_inject_property(module, tensor_name)
# Add a ParametrizationList
assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
module.parametrizations[tensor_name] = parametrizations
else:
raise ValueError(
f"Module '{module}' does not have a parameter, a buffer, or a "
f"parametrized element with name '{tensor_name}'"
)
return module
def is_parametrized(module: Module, tensor_name: Optional[str] = None) -> bool:
r"""Returns ``True`` if module has an active parametrization.
If the argument :attr:`tensor_name` is specified, returns ``True`` if
``module[tensor_name]`` is parametrized.
Args:
module (nn.Module): module to query
name (str, optional): attribute in the module to query
Default: ``None``
"""
parametrizations = getattr(module, "parametrizations", None)
if parametrizations is None or not isinstance(parametrizations, ModuleDict):
return False
if tensor_name is None:
# Check that there is at least one parametrized buffer or Parameter
return len(parametrizations) > 0
else:
return tensor_name in parametrizations
def remove_parametrizations(
module: Module, tensor_name: str, leave_parametrized: bool = True
) -> Module:
r"""Removes the parametrizations on a tensor in a module.
- If ``leave_parametrized=True``, ``module[tensor_name]`` will be set to
its current output. In this case, the parametrization shall not change the ``dtype``
of the tensor.
- If ``leave_parametrized=False``, ``module[tensor_name]`` will be set to
the unparametrised tensor in ``module.parametrizations[tensor_name].original``.
This is only possible when the parametrization depends on just one tensor.
Args:
module (nn.Module): module from which remove the parametrization
tensor_name (str): name of the parametrization to be removed
leave_parametrized (bool, optional): leave the attribute :attr:`tensor_name` parametrized.
Default: ``True``
Returns:
Module: module
Raises:
ValueError: if ``module[tensor_name]`` is not parametrized
ValueError: if ``leave_parametrized=False`` and the parametrization depends on several tensors
"""
if not is_parametrized(module, tensor_name):
raise ValueError(f"Module {module} does not have a parametrization on {tensor_name}")
# Fetch the original tensor
assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
parametrizations = module.parametrizations[tensor_name]
if parametrizations.is_tensor:
original = parametrizations.original
if leave_parametrized:
with torch.no_grad():
t = getattr(module, tensor_name)
# We know they have the same dtype because we have checked this when registering the
# parametrizations. As such, we can use set_
# We do this so that the parameter does not to change the id()
# This way the user does not need to update the optimizer
with torch.no_grad():
if type(original) is torch.Tensor:
original.set_(t)
else:
try:
original.set_(t)
except RuntimeError as e:
# TODO: Fix this for tensor subclasses that are parameters:
# RuntimeError: set_storage is not allowed on a Tensor created from .data or .detach().
raise RuntimeError("Calling remove_parametrizations() with leave_parametrized=True "
"for a parameter that is an instance of a tensor subclass requires "
"set_() to be implemented correctly for the tensor subclass. Either "
"set leave_parametrized=False or provide a working implementation for "
"set_() in the tensor subclass.")
else:
if leave_parametrized:
# We cannot use no_grad because we need to know whether one or more
# original tensors required grad
t = getattr(module, tensor_name)
# We'll have to trust the user to add it to the optimizer
original = Parameter(t) if t.requires_grad else t
else:
raise ValueError("Cannot leave unparametrized (`leave_parametrized=False`) a tensor "
"that is parametrized in terms of a sequence of tensors.")
# Delete the property that manages the parametrization
delattr(module.__class__, tensor_name)
# Delete the ParametrizationList
del module.parametrizations[tensor_name]
# Restore the parameter / buffer into the main class
_register_parameter_or_buffer(module, tensor_name, original)
# Roll back the parametrized class if no other buffer or parameter
# is currently parametrized in this class
if not is_parametrized(module):
delattr(module, "parametrizations")
# Restore class
orig_cls = module.__class__.__bases__[0]
module.__class__ = orig_cls
return module
def type_before_parametrizations(module: Module) -> type:
r"""Returns the module type before parametrizations were applied and if not,
then it returns the module type.
Args:
module (nn.Module): module to get type of
"""
if is_parametrized(module):
return module.__class__.__bases__[0]
else:
return type(module)
def transfer_parametrizations_and_params(
from_module: Module, to_module: Module, tensor_name: Optional[str] = None
) -> Module:
r"""Transfers parametrizations and the parameters they parametrize from from_module
to to_module. If tensor_name is specified, only transfers the specified parameter, otherwise
transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them.
Does nothing if from_module is not parametrized.
Args:
from_module (nn.Module): module to transfer from
to_module (nn.Module): module to transfer to
tensor_name (str, optional): parameter to transfer
Returns:
Module: to_module
"""
if is_parametrized(from_module):
assert isinstance(from_module.parametrizations, ModuleDict) # for mypy
# get list of all params or the single param to transfer
parameters_to_transfer: Union[list, ModuleDict] = (
from_module.parametrizations if tensor_name is None else [tensor_name]
)
assert hasattr(parameters_to_transfer, "__iter__") # for mypy
for parameter_name in parameters_to_transfer:
# initialize the to-be-transfered param in to_module if it doesn't exist already
if not hasattr(to_module, parameter_name):
setattr(
to_module,
parameter_name,
Parameter(getattr(from_module, parameter_name)),
)
# apply the params's parametrizations to to_module
for param_func in from_module.parametrizations[parameter_name]:
register_parametrization(to_module, parameter_name, param_func)
assert isinstance(to_module.parametrizations, ModuleDict) # for mypy
# make values match, original values can be stored in either original or
# original0, original1..., need to check both cases
if hasattr(from_module.parametrizations[parameter_name], "original"):
to_module.parametrizations[parameter_name].original = \
from_module.parametrizations[parameter_name].original
else:
num = 0
orig_num = "original" + str(num)
# loop through each original# until all values have been set
while hasattr(from_module.parametrizations[parameter_name], orig_num):
setattr(
to_module.parametrizations[parameter_name],
orig_num,
getattr(from_module.parametrizations[parameter_name], orig_num),
)
num = num + 1
orig_num = "original" + str(num)
return to_module
|
pytorch-master
|
torch/nn/utils/parametrize.py
|
import torch
def convert_conv2d_weight_memory_format(module, memory_format):
r"""Convert ``memory_format`` of ``nn.Conv2d.weight`` to ``memory_format``
The conversion recursively applies to nested ``nn.Module``, including ``module``.
Note that it only changes the memory_format, but not the semantics of each dimensions.
This function is used to facilitate the computation to adopt NHWC kernels, which
provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0
.. note::
Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive
than the utility function ``convert_conv2d_weight_memory_format``. Any
layer with 4d weight will be affected by ``model.to``, which does not
necessarily benefit from conversion to specified ``memory_format``.
One place we are confident in is that NHWC(channels_last) conversion for
convolution in cuDNN, As it is beneficial to run convolution in NHWC,
even in cases where we have to apply permutation to input tensors.
Hence our strategy here is to convert only the weight of convolution to
channels_last. This ensures that;
1. Fast convolution kernels will be used, the benefit of which could
outweigh overhead of permutation (if input is not in the same format)
2. No unnecessary permutations are applied on layers that do not benefit
from memory_format conversion.
The optimal case is that, layers between convolution layers are channels
last compatible. Input tensor would be permuted to channels last when it
encounters the first convolution layer and stay in that memory format.
Hence following convolutions will not need to permute its input tensor.
In case where a channels last incompatible layer is between convolution
layers, we need to permute the input tensor back to contiguous format
for that layer. The input tensor will go through the remaining layers in
contiguous format and be permuted to channels last when it encounters
another convolution layer. There's no point in propagating that
permutation to an earlier layer, as most layers are quite agnostic to
``memory_format``.
This claim might change when PyTorch supports fusion of permutation, as
there might have been a better spot to fuse the permutation other than
immediately before a convolution.
Args:
module (nn.Module): ``nn.Conv2d`` & ``nn.ConvTranspose2d`` or container
``nn.Module``
format: user specified ``memory_format``,
e.g. ``torch.channels_last`` or ``torch.contiguous_format``
Returns:
The original module with updated ``nn.Conv2d``
Example:
>>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG)
>>> input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float16, device="cuda")
>>> model = nn.Sequential(
>>> nn.Conv2d(8, 4, 3)).cuda().half()
>>> # This is identical to:
>>> # nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last)
>>> model = nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last)
>>> out = model(input)
"""
# TODO: expand this to `_ConvNd` when channels_last support is extended
# beyond only 4d tensors.
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.ConvTranspose2d):
weight_data = module.weight.detach().clone().contiguous(memory_format=memory_format)
module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format)
for child in module.children():
convert_conv2d_weight_memory_format(child, memory_format)
return module
|
pytorch-master
|
torch/nn/utils/memory_format.py
|
import functools
import torch
from torch.nn.utils.stateless import functional_call
from torch.nn.utils._expanded_weights.expanded_weights_impl import ExpandedWeight
from torch.utils._pytree import tree_flatten
# dependency on `functional_call` means that this can't be exposed in utils
# without creating circular dependency
def call_for_per_sample_grads(module, *, batch_size=None, loss_reduction="sum"):
r"""
call_for_per_sample_grads(module, batch_size=None, loss_reduction="sum")
``call_for_per_sample_grads`` returns a function that is invoked like the forward
function of ``module`` and will produce the same result. Then, when backward is invoked,
the parameters of ``module`` will have a ``grad_sample`` field populated with the per sample
gradients instead of the regular gradients
Args:
module: The ``nn.Module`` to get per sample gradients with respect to. All trainable
parameters will compute per sample gradients, located in a ``grad_sample``
field when ``backward`` is invoked
batch_size: The batch size of the input. If None is passed, all tensor arguments in args and kwargs must have
the same batch size, which is the size of the first dimension. Otherwise, it must be passed manually.
Default: None
loss_reduction: Indicates if the loss reduction (for aggregating the gradients) is a sum or a mean operation. If
"mean", per sample gradients will be scaled by the batch size to offset the crossbatch interaction from
running mean across a batch. Must be "mean" or "sum". Default: "sum"
Examples::
>>> model = nn.Linear(4, 3)
>>> batched_input = torch.randn(5, 4) # batch size of 5
>>> # xdoctest: +SKIP
>>> res = call_for_per_sample_grads(model)(batched_input).sum()
>>> res.backward()
>>> assert model.weight.shape == (3, 4)
>>> assert model.weight.grad_sample.shape == (5, 3, 4)
>>> assert model.weight.grad == None
>>> assert model.bias.shape == (3,)
>>> assert model.bias.grad_sample.shape == (5, 3)
>>> assert model.bias.grad == None
An example using "mean" loss reduction. The grad_sample fields will be scaled by batch_size from what they would be
if we ran the same code with loss_reduction="sum". This is because the mean at the end will scale all
grad_outputs by 1 / batch_size from cross batch interaction.
>>> model = nn.Linear(4, 3)
>>> batched_input = torch.randn(5, 4) # batch size of 5
>>> res = call_for_per_sample_grads(model, 5, loss_reduction="mean")(batched_input).mean()
>>> res.backward()
Note::
Does not work with any `nn.RNN`, including `nn.GRU` or `nn.LSTM`. Please use custom
rewrites that wrap an `nn.Linear` module. See Opacus for an example
"""
def maybe_build_expanded_weight(og_tensor, batch_size):
if og_tensor.requires_grad:
return ExpandedWeight(og_tensor, batch_size, loss_reduction)
else:
return og_tensor
def compute_batch_size(*args, **kwargs):
args_and_kwargs = tree_flatten(args)[0] + tree_flatten(kwargs)[0]
batch_size = None
for arg in args_and_kwargs:
if not isinstance(arg, torch.Tensor):
continue
arg_batch_size = arg.shape[0] # we assume batch size is the first dim
if batch_size is not None and batch_size != arg_batch_size:
raise RuntimeError("When computing batch size, found at least one input with batch size "
f"{batch_size} and one with batch size {arg_batch_size}. Please specify it "
"explicitly using the batch size kwarg in call_for_per_sample_grads")
batch_size = arg_batch_size
if batch_size is None:
raise RuntimeError("Unable to find a tensor in the passed args and kwargs. They may not be pytree-able "
"and so ExpandedWeights cannot compute the batch size from the inputs. Please specify "
"it explicitly")
return batch_size
if loss_reduction not in ["sum", "mean"]:
raise RuntimeError(f"Expected loss_reduction argument to be sum or mean, got {loss_reduction}")
if not isinstance(module, torch.nn.Module):
raise RuntimeError(f"Module passed must be nn.Module, got {type(module).__name__}")
if not (batch_size is None or isinstance(batch_size, int)):
raise RuntimeError(f"Batch size passed must be None or an integer, got {type(batch_size).__name__}")
if batch_size is not None and batch_size < 1:
raise RuntimeError(f"Batch size must be positive, got {batch_size}")
for weight in module.parameters():
if hasattr(weight, "grad_sample") and weight.grad_sample is not None: # type: ignore[attr-defined]
raise RuntimeError("Current Expanded Weights accumulates the gradients, which will be incorrect for multiple "
f"calls without clearing gradients. Please clear out the grad_sample parameter of {weight} or "
"post an issue to pytorch/pytorch to prioritize correct behavior")
@functools.wraps(module.forward)
def wrapper(*args, **kwargs):
wrapper_batch_size = batch_size
if wrapper_batch_size is None:
wrapper_batch_size = compute_batch_size(*args, **kwargs)
params = {name: maybe_build_expanded_weight(value, wrapper_batch_size) for (name, value) in module.named_parameters()}
return functional_call(module, params, args, kwargs)
return wrapper
|
pytorch-master
|
torch/nn/utils/_per_sample_grad.py
|
from . import rnn
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .weight_norm import weight_norm, remove_weight_norm
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import spectral_norm, remove_spectral_norm
from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights
from .memory_format import convert_conv2d_weight_memory_format
from . import parametrizations
from .init import skip_init
from . import stateless
|
pytorch-master
|
torch/nn/utils/__init__.py
|
r"""
Weight Normalization from https://arxiv.org/abs/1602.07868
"""
from torch.nn.parameter import Parameter, UninitializedParameter
from torch import _weight_norm, norm_except_dim
from typing import Any, TypeVar
from ..modules import Module
__all__ = ['WeightNorm', 'weight_norm', 'remove_weight_norm']
class WeightNorm(object):
name: str
dim: int
def __init__(self, name: str, dim: int) -> None:
if dim is None:
dim = -1
self.name = name
self.dim = dim
# TODO Make return type more specific
def compute_weight(self, module: Module) -> Any:
g = getattr(module, self.name + '_g')
v = getattr(module, self.name + '_v')
return _weight_norm(v, g, self.dim)
@staticmethod
def apply(module, name: str, dim: int) -> 'WeightNorm':
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightNorm) and hook.name == name:
raise RuntimeError("Cannot register two weight_norm hooks on "
"the same parameter {}".format(name))
if dim is None:
dim = -1
fn = WeightNorm(name, dim)
weight = getattr(module, name)
if isinstance(weight, UninitializedParameter):
raise ValueError(
'The module passed to `WeightNorm` can\'t have uninitialized parameters. '
'Make sure to run the dummy forward before applying weight normalization')
# remove w from parameter list
del module._parameters[name]
# add g and v as new parameters and express w as g/||v|| * v
module.register_parameter(name + '_g', Parameter(norm_except_dim(weight, 2, dim).data))
module.register_parameter(name + '_v', Parameter(weight.data))
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module: Module) -> None:
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + '_g']
del module._parameters[self.name + '_v']
setattr(module, self.name, Parameter(weight.data))
def __call__(self, module: Module, inputs: Any) -> None:
setattr(module, self.name, self.compute_weight(module))
T_module = TypeVar('T_module', bound=Module)
def weight_norm(module: T_module, name: str = 'weight', dim: int = 0) -> T_module:
r"""Applies weight normalization to a parameter in the given module.
.. math::
\mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}
Weight normalization is a reparameterization that decouples the magnitude
of a weight tensor from its direction. This replaces the parameter specified
by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude
(e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``).
Weight normalization is implemented via a hook that recomputes the weight
tensor from the magnitude and direction before every :meth:`~Module.forward`
call.
By default, with ``dim=0``, the norm is computed independently per output
channel/plane. To compute a norm over the entire weight tensor, use
``dim=None``.
See https://arxiv.org/abs/1602.07868
Args:
module (Module): containing module
name (str, optional): name of weight parameter
dim (int, optional): dimension over which to compute the norm
Returns:
The original module with the weight norm hook
Example::
>>> m = weight_norm(nn.Linear(20, 40), name='weight')
>>> m
Linear(in_features=20, out_features=40, bias=True)
>>> m.weight_g.size()
torch.Size([40, 1])
>>> m.weight_v.size()
torch.Size([40, 20])
"""
WeightNorm.apply(module, name, dim)
return module
def remove_weight_norm(module: T_module, name: str = 'weight') -> T_module:
r"""Removes the weight normalization reparameterization from a module.
Args:
module (Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = weight_norm(nn.Linear(20, 40))
>>> remove_weight_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, WeightNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("weight_norm of '{}' not found in {}"
.format(name, module))
|
pytorch-master
|
torch/nn/utils/weight_norm.py
|
# This file is never automatically imported within PyTorch so it is ok to
# always warn here
import warnings
warnings.warn("The `torch.nn.utils._stateless` code is deprecated now that "
"it is publicly available. Please use `torch.nn.utils.stateless "
"instead.", DeprecationWarning)
# Import * wouldn't work as most things are private and thus wouldn't be imported
# here.
from torch.nn.utils.stateless import functional_call # noqa: F401
from torch.nn.utils.stateless import _apply_func_submodules, _change_class # noqa: F401
# This one used to look public but should actually be private. This was fixed when making the module
# public and is kept here for BC
from torch.nn.utils.stateless import _reparametrize_module as reparametrize_module # noqa: F401
|
pytorch-master
|
torch/nn/utils/_stateless.py
|
import copy
import torch
def fuse_conv_bn_eval(conv, bn, transpose=False):
assert(not (conv.training or bn.training)), "Fusion only for eval!"
fused_conv = copy.deepcopy(conv)
fused_conv.weight, fused_conv.bias = \
fuse_conv_bn_weights(fused_conv.weight, fused_conv.bias,
bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias, transpose)
return fused_conv
def fuse_conv_bn_weights(conv_w, conv_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b, transpose=False):
if conv_b is None:
conv_b = torch.zeros_like(bn_rm)
if bn_w is None:
bn_w = torch.ones_like(bn_rm)
if bn_b is None:
bn_b = torch.zeros_like(bn_rm)
bn_var_rsqrt = torch.rsqrt(bn_rv + bn_eps)
if transpose:
shape = [1, -1] + [1] * (len(conv_w.shape) - 2)
else:
shape = [-1, 1] + [1] * (len(conv_w.shape) - 2)
conv_w = conv_w * (bn_w * bn_var_rsqrt).reshape(shape)
conv_b = (conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b
return torch.nn.Parameter(conv_w), torch.nn.Parameter(conv_b)
def fuse_linear_bn_eval(linear, bn):
assert(not (linear.training or bn.training)), "Fusion only for eval!"
fused_linear = copy.deepcopy(linear)
fused_linear.weight, fused_linear.bias = fuse_linear_bn_weights(
fused_linear.weight, fused_linear.bias,
bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias)
return fused_linear
def fuse_linear_bn_weights(linear_w, linear_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b):
if linear_b is None:
linear_b = torch.zeros_like(bn_rm)
bn_scale = bn_w * torch.rsqrt(bn_rv + bn_eps)
fused_w = linear_w * bn_scale.unsqueeze(-1)
fused_b = (linear_b - bn_rm) * bn_scale + bn_b
return torch.nn.Parameter(fused_w), torch.nn.Parameter(fused_b)
|
pytorch-master
|
torch/nn/utils/fusion.py
|
r"""
Pruning methods
"""
import numbers
from abc import ABC, abstractmethod
from collections.abc import Iterable
from typing import Tuple
import torch
class BasePruningMethod(ABC):
r"""Abstract base class for creation of new pruning techniques.
Provides a skeleton for customization requiring the overriding of methods
such as :meth:`compute_mask` and :meth:`apply`.
"""
_tensor_name: str
def __init__(self):
pass
def __call__(self, module, inputs):
r"""Multiplies the mask (stored in ``module[name + '_mask']``)
into the original tensor (stored in ``module[name + '_orig']``)
and stores the result into ``module[name]`` by using
:meth:`apply_mask`.
Args:
module (nn.Module): module containing the tensor to prune
inputs: not used.
"""
setattr(module, self._tensor_name, self.apply_mask(module))
@abstractmethod
def compute_mask(self, t, default_mask):
r"""Computes and returns a mask for the input tensor ``t``.
Starting from a base ``default_mask`` (which should be a mask of ones
if the tensor has not been pruned yet), generate a random mask to
apply on top of the ``default_mask`` according to the specific pruning
method recipe.
Args:
t (torch.Tensor): tensor representing the importance scores of the
parameter to prune.
default_mask (torch.Tensor): Base mask from previous pruning
iterations, that need to be respected after the new mask is
applied. Same dims as ``t``.
Returns:
mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
"""
pass
def apply_mask(self, module):
r"""Simply handles the multiplication between the parameter being
pruned and the generated mask.
Fetches the mask and the original tensor from the module
and returns the pruned version of the tensor.
Args:
module (nn.Module): module containing the tensor to prune
Returns:
pruned_tensor (torch.Tensor): pruned version of the input tensor
"""
# to carry out the multiplication, the mask needs to have been computed,
# so the pruning method must know what tensor it's operating on
assert self._tensor_name is not None, "Module {} has to be pruned".format(
module
) # this gets set in apply()
mask = getattr(module, self._tensor_name + "_mask")
orig = getattr(module, self._tensor_name + "_orig")
pruned_tensor = mask.to(dtype=orig.dtype) * orig
return pruned_tensor
@classmethod
def apply(cls, module, name, *args, importance_scores=None, **kwargs):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
args: arguments passed on to a subclass of
:class:`BasePruningMethod`
importance_scores (torch.Tensor): tensor of importance scores (of
same shape as module parameter) used to compute mask for pruning.
The values in this tensor indicate the importance of the
corresponding elements in the parameter being pruned.
If unspecified or None, the parameter will be used in its place.
kwargs: keyword arguments passed on to a subclass of a
:class:`BasePruningMethod`
"""
def _get_composite_method(cls, module, name, *args, **kwargs):
# Check if a pruning method has already been applied to
# `module[name]`. If so, store that in `old_method`.
old_method = None
found = 0
# there should technically be only 1 hook with hook.name == name
# assert this using `found`
hooks_to_remove = []
for k, hook in module._forward_pre_hooks.items():
# if it exists, take existing thing, remove hook, then
# go through normal thing
if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
old_method = hook
hooks_to_remove.append(k)
found += 1
assert (
found <= 1
), "Avoid adding multiple pruning hooks to the\
same tensor {} of module {}. Use a PruningContainer.".format(
name, module
)
for k in hooks_to_remove:
del module._forward_pre_hooks[k]
# Apply the new pruning method, either from scratch or on top of
# the previous one.
method = cls(*args, **kwargs) # new pruning
# Have the pruning method remember what tensor it's been applied to
method._tensor_name = name
# combine `methods` with `old_method`, if `old_method` exists
if old_method is not None: # meaning that there was a hook
# if the hook is already a pruning container, just add the
# new pruning method to the container
if isinstance(old_method, PruningContainer):
old_method.add_pruning_method(method)
method = old_method # rename old_method --> method
# if the hook is simply a single pruning method, create a
# container, add the old pruning method and the new one
elif isinstance(old_method, BasePruningMethod):
container = PruningContainer(old_method)
# Have the pruning method remember the name of its tensor
# setattr(container, '_tensor_name', name)
container.add_pruning_method(method)
method = container # rename container --> method
return method
method = _get_composite_method(cls, module, name, *args, **kwargs)
# at this point we have no forward_pre_hooks but we could have an
# active reparametrization of the tensor if another pruning method
# had been applied (in which case `method` would be a PruningContainer
# and not a simple pruning method).
# Pruning is to be applied to the module's tensor named `name`,
# starting from the state it is found in prior to this iteration of
# pruning. The pruning mask is calculated based on importances scores.
orig = getattr(module, name)
if importance_scores is not None:
assert (
importance_scores.shape == orig.shape
), "importance_scores should have the same shape as parameter \
{} of {}".format(
name, module
)
else:
importance_scores = orig
# If this is the first time pruning is applied, take care of moving
# the original tensor to a new parameter called name + '_orig' and
# and deleting the original parameter
if not isinstance(method, PruningContainer):
# copy `module[name]` to `module[name + '_orig']`
module.register_parameter(name + "_orig", orig)
# temporarily delete `module[name]`
del module._parameters[name]
default_mask = torch.ones_like(orig) # temp
# If this is not the first time pruning is applied, all of the above
# has been done before in a previous pruning iteration, so we're good
# to go
else:
default_mask = (
getattr(module, name + "_mask")
.detach()
.clone(memory_format=torch.contiguous_format)
)
# Use try/except because if anything goes wrong with the mask
# computation etc., you'd want to roll back.
try:
# get the final mask, computed according to the specific method
mask = method.compute_mask(importance_scores, default_mask=default_mask)
# reparametrize by saving mask to `module[name + '_mask']`...
module.register_buffer(name + "_mask", mask)
# ... and the new pruned tensor to `module[name]`
setattr(module, name, method.apply_mask(module))
# associate the pruning method to the module via a hook to
# compute the function before every forward() (compile by run)
module.register_forward_pre_hook(method)
except Exception as e:
if not isinstance(method, PruningContainer):
orig = getattr(module, name + "_orig")
module.register_parameter(name, orig)
del module._parameters[name + "_orig"]
raise e
return method
def prune(self, t, default_mask=None, importance_scores=None):
r"""Computes and returns a pruned version of input tensor ``t``
according to the pruning rule specified in :meth:`compute_mask`.
Args:
t (torch.Tensor): tensor to prune (of same dimensions as
``default_mask``).
importance_scores (torch.Tensor): tensor of importance scores (of
same shape as ``t``) used to compute mask for pruning ``t``.
The values in this tensor indicate the importance of the
corresponding elements in the ``t`` that is being pruned.
If unspecified or None, the tensor ``t`` will be used in its place.
default_mask (torch.Tensor, optional): mask from previous pruning
iteration, if any. To be considered when determining what
portion of the tensor that pruning should act on. If None,
default to a mask of ones.
Returns:
pruned version of tensor ``t``.
"""
if importance_scores is not None:
assert (
importance_scores.shape == t.shape
), "importance_scores should have the same shape as tensor t"
else:
importance_scores = t
default_mask = default_mask if default_mask is not None else torch.ones_like(t)
return t * self.compute_mask(importance_scores, default_mask=default_mask)
def remove(self, module):
r"""Removes the pruning reparameterization from a module. The pruned
parameter named ``name`` remains permanently pruned, and the parameter
named ``name+'_orig'`` is removed from the parameter list. Similarly,
the buffer named ``name+'_mask'`` is removed from the buffers.
Note:
Pruning itself is NOT undone or reversed!
"""
# before removing pruning from a tensor, it has to have been applied
assert (
self._tensor_name is not None
), "Module {} has to be pruned\
before pruning can be removed".format(
module
) # this gets set in apply()
# to update module[name] to latest trained weights
weight = self.apply_mask(module) # masked weights
# delete and reset
if hasattr(module, self._tensor_name):
delattr(module, self._tensor_name)
orig = module._parameters[self._tensor_name + "_orig"]
orig.data = weight.data
del module._parameters[self._tensor_name + "_orig"]
del module._buffers[self._tensor_name + "_mask"]
setattr(module, self._tensor_name, orig)
class PruningContainer(BasePruningMethod):
"""Container holding a sequence of pruning methods for iterative pruning.
Keeps track of the order in which pruning methods are applied and handles
combining successive pruning calls.
Accepts as argument an instance of a BasePruningMethod or an iterable of
them.
"""
def __init__(self, *args):
self._pruning_methods: Tuple["BasePruningMethod", ...] = tuple()
if not isinstance(args, Iterable): # only 1 item
self._tensor_name = args._tensor_name
self.add_pruning_method(args)
elif len(args) == 1: # only 1 item in a tuple
self._tensor_name = args[0]._tensor_name
self.add_pruning_method(args[0])
else: # manual construction from list or other iterable (or no args)
for method in args:
self.add_pruning_method(method)
def add_pruning_method(self, method):
r"""Adds a child pruning ``method`` to the container.
Args:
method (subclass of BasePruningMethod): child pruning method
to be added to the container.
"""
# check that we're adding a pruning method to the container
if not isinstance(method, BasePruningMethod) and method is not None:
raise TypeError(
"{} is not a BasePruningMethod subclass".format(type(method))
)
elif method is not None and self._tensor_name != method._tensor_name:
raise ValueError(
"Can only add pruning methods acting on "
"the parameter named '{}' to PruningContainer {}.".format(
self._tensor_name, self
)
+ " Found '{}'".format(method._tensor_name)
)
# if all checks passed, add to _pruning_methods tuple
self._pruning_methods += (method,) # type: ignore[operator]
def __len__(self):
return len(self._pruning_methods)
def __iter__(self):
return iter(self._pruning_methods)
def __getitem__(self, idx):
return self._pruning_methods[idx]
def compute_mask(self, t, default_mask):
r"""Applies the latest ``method`` by computing the new partial masks
and returning its combination with the ``default_mask``.
The new partial mask should be computed on the entries or channels
that were not zeroed out by the ``default_mask``.
Which portions of the tensor ``t`` the new mask will be calculated from
depends on the ``PRUNING_TYPE`` (handled by the type handler):
* for 'unstructured', the mask will be computed from the raveled
list of nonmasked entries;
* for 'structured', the mask will be computed from the nonmasked
channels in the tensor;
* for 'global', the mask will be computed across all entries.
Args:
t (torch.Tensor): tensor representing the parameter to prune
(of same dimensions as ``default_mask``).
default_mask (torch.Tensor): mask from previous pruning iteration.
Returns:
mask (torch.Tensor): new mask that combines the effects
of the ``default_mask`` and the new mask from the current
pruning ``method`` (of same dimensions as ``default_mask`` and
``t``).
"""
def _combine_masks(method, t, mask):
r"""
Args:
method (a BasePruningMethod subclass): pruning method
currently being applied.
t (torch.Tensor): tensor representing the parameter to prune
(of same dimensions as mask).
mask (torch.Tensor): mask from previous pruning iteration
Returns:
new_mask (torch.Tensor): new mask that combines the effects
of the old mask and the new mask from the current
pruning method (of same dimensions as mask and t).
"""
new_mask = mask # start off from existing mask
new_mask = new_mask.to(dtype=t.dtype)
# compute a slice of t onto which the new pruning method will operate
if method.PRUNING_TYPE == "unstructured":
# prune entries of t where the mask is 1
slc = mask == 1
# for struct pruning, exclude channels that have already been
# entirely pruned
elif method.PRUNING_TYPE == "structured":
if not hasattr(method, "dim"):
raise AttributeError(
"Pruning methods of PRUNING_TYPE "
'"structured" need to have the attribute `dim` defined.'
)
# find the channels to keep by removing the ones that have been
# zeroed out already (i.e. where sum(entries) == 0)
n_dims = t.dim() # "is this a 2D tensor? 3D? ..."
dim = method.dim
# convert negative indexing
if dim < 0:
dim = n_dims + dim
# if dim is still negative after subtracting it from n_dims
if dim < 0:
raise IndexError(
"Index is out of bounds for tensor with dimensions {}".format(
n_dims
)
)
# find channels along dim = dim that aren't already tots 0ed out
keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0
# create slice to identify what to prune
slc = [slice(None)] * n_dims
slc[dim] = keep_channel
elif method.PRUNING_TYPE == "global":
n_dims = len(t.shape) # "is this a 2D tensor? 3D? ..."
slc = [slice(None)] * n_dims
else:
raise ValueError(
"Unrecognized PRUNING_TYPE {}".format(method.PRUNING_TYPE)
)
# compute the new mask on the unpruned slice of the tensor t
partial_mask = method.compute_mask(t[slc], default_mask=mask[slc])
new_mask[slc] = partial_mask.to(dtype=new_mask.dtype)
return new_mask
method = self._pruning_methods[-1]
mask = _combine_masks(method, t, default_mask)
return mask
class Identity(BasePruningMethod):
r"""Utility pruning method that does not prune any units but generates the
pruning parametrization with a mask of ones.
"""
PRUNING_TYPE = "unstructured"
def compute_mask(self, t, default_mask):
mask = default_mask
return mask
@classmethod
def apply(cls, module, name):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
"""
return super(Identity, cls).apply(module, name)
class RandomUnstructured(BasePruningMethod):
r"""Prune (currently unpruned) units in a tensor at random.
Args:
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
"""
PRUNING_TYPE = "unstructured"
def __init__(self, amount):
# Check range of validity of pruning amount
_validate_pruning_amount_init(amount)
self.amount = amount
def compute_mask(self, t, default_mask):
# Check that the amount of units to prune is not > than the number of
# parameters in t
tensor_size = t.nelement()
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
mask = default_mask.clone(memory_format=torch.contiguous_format)
if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
prob = torch.rand_like(t)
topk = torch.topk(prob.view(-1), k=nparams_toprune)
mask.view(-1)[topk.indices] = 0
return mask
@classmethod
def apply(cls, module, name, amount):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
"""
return super(RandomUnstructured, cls).apply(module, name, amount=amount)
class L1Unstructured(BasePruningMethod):
r"""Prune (currently unpruned) units in a tensor by zeroing out the ones
with the lowest L1-norm.
Args:
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
"""
PRUNING_TYPE = "unstructured"
def __init__(self, amount):
# Check range of validity of pruning amount
_validate_pruning_amount_init(amount)
self.amount = amount
def compute_mask(self, t, default_mask):
# Check that the amount of units to prune is not > than the number of
# parameters in t
tensor_size = t.nelement()
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
mask = default_mask.clone(memory_format=torch.contiguous_format)
if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
# largest=True --> top k; largest=False --> bottom k
# Prune the smallest k
topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False)
# topk will have .indices and .values
mask.view(-1)[topk.indices] = 0
return mask
@classmethod
def apply(cls, module, name, amount, importance_scores=None):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
importance_scores (torch.Tensor): tensor of importance scores (of same
shape as module parameter) used to compute mask for pruning.
The values in this tensor indicate the importance of the corresponding
elements in the parameter being pruned.
If unspecified or None, the module parameter will be used in its place.
"""
return super(L1Unstructured, cls).apply(
module, name, amount=amount, importance_scores=importance_scores
)
class RandomStructured(BasePruningMethod):
r"""Prune entire (currently unpruned) channels in a tensor at random.
Args:
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
dim (int, optional): index of the dim along which we define
channels to prune. Default: -1.
"""
PRUNING_TYPE = "structured"
def __init__(self, amount, dim=-1):
# Check range of validity of amount
_validate_pruning_amount_init(amount)
self.amount = amount
self.dim = dim
def compute_mask(self, t, default_mask):
r"""Computes and returns a mask for the input tensor ``t``.
Starting from a base ``default_mask`` (which should be a mask of ones
if the tensor has not been pruned yet), generate a random mask to
apply on top of the ``default_mask`` by randomly zeroing out channels
along the specified dim of the tensor.
Args:
t (torch.Tensor): tensor representing the parameter to prune
default_mask (torch.Tensor): Base mask from previous pruning
iterations, that need to be respected after the new mask is
applied. Same dims as ``t``.
Returns:
mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
Raises:
IndexError: if ``self.dim >= len(t.shape)``
"""
# Check that tensor has structure (i.e. more than 1 dimension) such
# that the concept of "channels" makes sense
_validate_structured_pruning(t)
# Check that self.dim is a valid dim to index t, else raise IndexError
_validate_pruning_dim(t, self.dim)
# Check that the amount of channels to prune is not > than the number of
# channels in t along the dim to prune
tensor_size = t.shape[self.dim]
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
# Compute binary mask by initializing it to all 0s and then filling in
# 1s wherever topk.indices indicates, along self.dim.
# mask has the same shape as tensor t
def make_mask(t, dim, nchannels, nchannels_toprune):
# generate a random number in [0, 1] to associate to each channel
prob = torch.rand(nchannels)
# generate mask for each channel by 0ing out the channels that
# got assigned the k = nchannels_toprune lowest values in prob
threshold = torch.kthvalue(prob, k=nchannels_toprune).values
channel_mask = prob > threshold
mask = torch.zeros_like(t)
slc = [slice(None)] * len(t.shape)
slc[dim] = channel_mask
mask[slc] = 1
return mask
if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
mask = default_mask
else:
# apply the new structured mask on top of prior (potentially
# unstructured) mask
mask = make_mask(t, self.dim, tensor_size, nparams_toprune)
mask *= default_mask.to(dtype=mask.dtype)
return mask
@classmethod
def apply(cls, module, name, amount, dim=-1):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
dim (int, optional): index of the dim along which we define
channels to prune. Default: -1.
"""
return super(RandomStructured, cls).apply(module, name, amount=amount, dim=dim)
class LnStructured(BasePruningMethod):
r"""Prune entire (currently unpruned) channels in a tensor based on their
L\ ``n``-norm.
Args:
amount (int or float): quantity of channels to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument ``p`` in :func:`torch.norm`.
dim (int, optional): index of the dim along which we define
channels to prune. Default: -1.
"""
PRUNING_TYPE = "structured"
def __init__(self, amount, n, dim=-1):
# Check range of validity of amount
_validate_pruning_amount_init(amount)
self.amount = amount
self.n = n
self.dim = dim
def compute_mask(self, t, default_mask):
r"""Computes and returns a mask for the input tensor ``t``.
Starting from a base ``default_mask`` (which should be a mask of ones
if the tensor has not been pruned yet), generate a mask to apply on
top of the ``default_mask`` by zeroing out the channels along the
specified dim with the lowest L\ ``n``-norm.
Args:
t (torch.Tensor): tensor representing the parameter to prune
default_mask (torch.Tensor): Base mask from previous pruning
iterations, that need to be respected after the new mask is
applied. Same dims as ``t``.
Returns:
mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
Raises:
IndexError: if ``self.dim >= len(t.shape)``
"""
# Check that tensor has structure (i.e. more than 1 dimension) such
# that the concept of "channels" makes sense
_validate_structured_pruning(t)
# Check that self.dim is a valid dim to index t, else raise IndexError
_validate_pruning_dim(t, self.dim)
# Check that the amount of channels to prune is not > than the number of
# channels in t along the dim to prune
tensor_size = t.shape[self.dim]
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
nparams_tokeep = tensor_size - nparams_toprune
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
# Structured pruning prunes entire channels so we need to know the
# L_n norm along each channel to then find the topk based on this
# metric
norm = _compute_norm(t, self.n, self.dim)
# largest=True --> top k; largest=False --> bottom k
# Keep the largest k channels along dim=self.dim
topk = torch.topk(norm, k=nparams_tokeep, largest=True)
# topk will have .indices and .values
# Compute binary mask by initializing it to all 0s and then filling in
# 1s wherever topk.indices indicates, along self.dim.
# mask has the same shape as tensor t
def make_mask(t, dim, indices):
# init mask to 0
mask = torch.zeros_like(t)
# e.g.: slc = [None, None, None], if len(t.shape) = 3
slc = [slice(None)] * len(t.shape)
# replace a None at position=dim with indices
# e.g.: slc = [None, None, [0, 2, 3]] if dim=2 & indices=[0,2,3]
slc[dim] = indices
# use slc to slice mask and replace all its entries with 1s
# e.g.: mask[:, :, [0, 2, 3]] = 1
mask[slc] = 1
return mask
if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
mask = default_mask
else:
mask = make_mask(t, self.dim, topk.indices)
mask *= default_mask.to(dtype=mask.dtype)
return mask
@classmethod
def apply(cls, module, name, amount, n, dim, importance_scores=None):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument ``p`` in :func:`torch.norm`.
dim (int): index of the dim along which we define channels to
prune.
importance_scores (torch.Tensor): tensor of importance scores (of same
shape as module parameter) used to compute mask for pruning.
The values in this tensor indicate the importance of the corresponding
elements in the parameter being pruned.
If unspecified or None, the module parameter will be used in its place.
"""
return super(LnStructured, cls).apply(
module,
name,
amount=amount,
n=n,
dim=dim,
importance_scores=importance_scores,
)
class CustomFromMask(BasePruningMethod):
PRUNING_TYPE = "global"
def __init__(self, mask):
self.mask = mask
def compute_mask(self, t, default_mask):
assert default_mask.shape == self.mask.shape
mask = default_mask * self.mask.to(dtype=default_mask.dtype)
return mask
@classmethod
def apply(cls, module, name, mask):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
"""
return super(CustomFromMask, cls).apply(module, name, mask=mask)
def identity(module, name):
r"""Applies pruning reparametrization to the tensor corresponding to the
parameter called ``name`` in ``module`` without actually pruning any
units. Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Note:
The mask is a tensor of ones.
Args:
module (nn.Module): module containing the tensor to prune.
name (str): parameter name within ``module`` on which pruning
will act.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> # xdoctest: +SKIP
>>> m = prune.identity(nn.Linear(2, 3), 'bias')
>>> print(m.bias_mask)
tensor([1., 1., 1.])
"""
Identity.apply(module, name)
return module
def random_unstructured(module, name, amount):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified ``amount`` of (currently unpruned) units
selected at random.
Modifies module in place (and also return the modified module) by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> # xdoctest: +SKIP
>>> m = prune.random_unstructured(nn.Linear(2, 3), 'weight', amount=1)
>>> torch.sum(m.weight_mask == 0)
tensor(1)
"""
RandomUnstructured.apply(module, name, amount)
return module
def l1_unstructured(module, name, amount, importance_scores=None):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified `amount` of (currently unpruned) units with the
lowest L1-norm.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
importance_scores (torch.Tensor): tensor of importance scores (of same
shape as module parameter) used to compute mask for pruning.
The values in this tensor indicate the importance of the corresponding
elements in the parameter being pruned.
If unspecified or None, the module parameter will be used in its place.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> # xdoctest: +SKIP
>>> m = prune.l1_unstructured(nn.Linear(2, 3), 'weight', amount=0.2)
>>> m.state_dict().keys()
odict_keys(['bias', 'weight_orig', 'weight_mask'])
"""
L1Unstructured.apply(
module, name, amount=amount, importance_scores=importance_scores
)
return module
def random_structured(module, name, amount, dim):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified ``amount`` of (currently unpruned) channels
along the specified ``dim`` selected at random.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
dim (int): index of the dim along which we define channels to prune.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> # xdoctest: +SKIP
>>> m = prune.random_structured(
... nn.Linear(5, 3), 'weight', amount=3, dim=1
... )
>>> columns_pruned = int(sum(torch.sum(m.weight, dim=0) == 0))
>>> print(columns_pruned)
3
"""
RandomStructured.apply(module, name, amount, dim)
return module
def ln_structured(module, name, amount, n, dim, importance_scores=None):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified ``amount`` of (currently unpruned) channels
along the specified ``dim`` with the lowest L\ ``n``-norm.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument ``p`` in :func:`torch.norm`.
dim (int): index of the dim along which we define channels to prune.
importance_scores (torch.Tensor): tensor of importance scores (of same
shape as module parameter) used to compute mask for pruning.
The values in this tensor indicate the importance of the corresponding
elements in the parameter being pruned.
If unspecified or None, the module parameter will be used in its place.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> # xdoctest: +SKIP
>>> m = prune.ln_structured(
... nn.Conv2d(5, 3, 2), 'weight', amount=0.3, dim=1, n=float('-inf')
... )
"""
LnStructured.apply(
module, name, amount, n, dim, importance_scores=importance_scores
)
return module
def global_unstructured(parameters, pruning_method, importance_scores=None, **kwargs):
r"""
Globally prunes tensors corresponding to all parameters in ``parameters``
by applying the specified ``pruning_method``.
Modifies modules in place by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
parameters (Iterable of (module, name) tuples): parameters of
the model to prune in a global fashion, i.e. by aggregating all
weights prior to deciding which ones to prune. module must be of
type :class:`nn.Module`, and name must be a string.
pruning_method (function): a valid pruning function from this module,
or a custom one implemented by the user that satisfies the
implementation guidelines and has ``PRUNING_TYPE='unstructured'``.
importance_scores (dict): a dictionary mapping (module, name) tuples to
the corresponding parameter's importance scores tensor. The tensor
should be the same shape as the parameter, and is used for computing
mask for pruning.
If unspecified or None, the parameter will be used in place of its
importance scores.
kwargs: other keyword arguments such as:
amount (int or float): quantity of parameters to prune across the
specified parameters.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
Raises:
TypeError: if ``PRUNING_TYPE != 'unstructured'``
Note:
Since global structured pruning doesn't make much sense unless the
norm is normalized by the size of the parameter, we now limit the
scope of global pruning to unstructured methods.
Examples:
>>> # xdoctest: +SKIP
>>> net = nn.Sequential(OrderedDict([
... ('first', nn.Linear(10, 4)),
... ('second', nn.Linear(4, 1)),
... ]))
>>> parameters_to_prune = (
... (net.first, 'weight'),
... (net.second, 'weight'),
... )
>>> prune.global_unstructured(
... parameters_to_prune,
... pruning_method=prune.L1Unstructured,
... amount=10,
... )
>>> print(sum(torch.nn.utils.parameters_to_vector(net.buffers()) == 0))
tensor(10, dtype=torch.uint8)
"""
# ensure parameters is a list or generator of tuples
if not isinstance(parameters, Iterable):
raise TypeError("global_unstructured(): parameters is not an Iterable")
importance_scores = importance_scores if importance_scores is not None else {}
if not isinstance(importance_scores, dict):
raise TypeError("global_unstructured(): importance_scores must be of type dict")
# flatten importance scores to consider them all at once in global pruning
relevant_importance_scores = torch.nn.utils.parameters_to_vector(
[
importance_scores.get((module, name), getattr(module, name))
for (module, name) in parameters
]
)
# similarly, flatten the masks (if they exist), or use a flattened vector
# of 1s of the same dimensions as t
default_mask = torch.nn.utils.parameters_to_vector(
[
getattr(module, name + "_mask", torch.ones_like(getattr(module, name)))
for (module, name) in parameters
]
)
# use the canonical pruning methods to compute the new mask, even if the
# parameter is now a flattened out version of `parameters`
container = PruningContainer()
container._tensor_name = "temp" # to make it match that of `method`
method = pruning_method(**kwargs)
method._tensor_name = "temp" # to make it match that of `container`
if method.PRUNING_TYPE != "unstructured":
raise TypeError(
'Only "unstructured" PRUNING_TYPE supported for '
"the `pruning_method`. Found method {} of type {}".format(
pruning_method, method.PRUNING_TYPE
)
)
container.add_pruning_method(method)
# use the `compute_mask` method from `PruningContainer` to combine the
# mask computed by the new method with the pre-existing mask
final_mask = container.compute_mask(relevant_importance_scores, default_mask)
# Pointer for slicing the mask to match the shape of each parameter
pointer = 0
for module, name in parameters:
param = getattr(module, name)
# The length of the parameter
num_param = param.numel()
# Slice the mask, reshape it
param_mask = final_mask[pointer : pointer + num_param].view_as(param)
# Assign the correct pre-computed mask to each parameter and add it
# to the forward_pre_hooks like any other pruning method
custom_from_mask(module, name, mask=param_mask)
# Increment the pointer to continue slicing the final_mask
pointer += num_param
def custom_from_mask(module, name, mask):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by applying the pre-computed mask in ``mask``.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
mask (Tensor): binary mask to be applied to the parameter.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> # xdoctest: +SKIP
>>> m = prune.custom_from_mask(
... nn.Linear(5, 3), name='bias', mask=torch.tensor([0, 1, 0])
... )
>>> print(m.bias_mask)
tensor([0., 1., 0.])
"""
CustomFromMask.apply(module, name, mask)
return module
def remove(module, name):
r"""Removes the pruning reparameterization from a module and the
pruning method from the forward hook. The pruned
parameter named ``name`` remains permanently pruned, and the parameter
named ``name+'_orig'`` is removed from the parameter list. Similarly,
the buffer named ``name+'_mask'`` is removed from the buffers.
Note:
Pruning itself is NOT undone or reversed!
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
Examples:
>>> m = random_unstructured(nn.Linear(5, 7), name='weight', amount=0.2)
>>> m = remove(m, name='weight')
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError(
"Parameter '{}' of module {} has to be pruned "
"before pruning can be removed".format(name, module)
)
def is_pruned(module):
r"""Check whether ``module`` is pruned by looking for
``forward_pre_hooks`` in its modules that inherit from the
:class:`BasePruningMethod`.
Args:
module (nn.Module): object that is either pruned or unpruned
Returns:
binary answer to whether ``module`` is pruned.
Examples:
>>> m = nn.Linear(5, 7)
>>> # xdoctest: +SKIP
>>> print(prune.is_pruned(m))
False
>>> prune.random_unstructured(m, name='weight', amount=0.2)
>>> print(prune.is_pruned(m))
True
"""
for _, submodule in module.named_modules():
for _, hook in submodule._forward_pre_hooks.items():
if isinstance(hook, BasePruningMethod):
return True
return False
def _validate_pruning_amount_init(amount):
r"""Validation helper to check the range of amount at init.
Args:
amount (int or float): quantity of parameters to prune.
If float, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If int, it represents the
absolute number of parameters to prune.
Raises:
ValueError: if amount is a float not in [0, 1], or if it's a negative
integer.
TypeError: if amount is neither a float nor an integer.
Note:
This does not take into account the number of parameters in the
tensor to be pruned, which is known only at prune.
"""
if not isinstance(amount, numbers.Real):
raise TypeError(
"Invalid type for amount: {}. Must be int or float." "".format(amount)
)
if (isinstance(amount, numbers.Integral) and amount < 0) or (
not isinstance(amount, numbers.Integral) # so it's a float
and (float(amount) > 1.0 or float(amount) < 0.0)
):
raise ValueError(
"amount={} should either be a float in the "
"range [0, 1] or a non-negative integer"
"".format(amount)
)
def _validate_pruning_amount(amount, tensor_size):
r"""Validation helper to check that the amount of parameters to prune
is meaningful wrt to the size of the data (`tensor_size`).
Args:
amount (int or float): quantity of parameters to prune.
If float, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If int, it represents the
absolute number of parameters to prune.
tensor_size (int): absolute number of parameters in the tensor
to prune.
"""
# TODO: consider removing this check and allowing users to specify
# a number of units to prune that is greater than the number of units
# left to prune. In this case, the tensor will just be fully pruned.
if isinstance(amount, numbers.Integral) and amount > tensor_size:
raise ValueError(
"amount={} should be smaller than the number of "
"parameters to prune={}".format(amount, tensor_size)
)
def _validate_structured_pruning(t):
r"""Validation helper to check that the tensor to be pruned is multi-
dimensional, such that the concept of "channels" is well-defined.
Args:
t (torch.Tensor): tensor representing the parameter to prune
Raises:
ValueError: if the tensor `t` is not at least 2D.
"""
shape = t.shape
if len(shape) <= 1:
raise ValueError(
"Structured pruning can only be applied to "
"multidimensional tensors. Found tensor of shape "
"{} with {} dims".format(shape, len(shape))
)
def _compute_nparams_toprune(amount, tensor_size):
r"""Since amount can be expressed either in absolute value or as a
percentage of the number of units/channels in a tensor, this utility
function converts the percentage to absolute value to standardize
the handling of pruning.
Args:
amount (int or float): quantity of parameters to prune.
If float, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If int, it represents the
absolute number of parameters to prune.
tensor_size (int): absolute number of parameters in the tensor
to prune.
Returns:
int: the number of units to prune in the tensor
"""
# incorrect type already checked in _validate_pruning_amount_init
if isinstance(amount, numbers.Integral):
return amount
else:
return round(amount * tensor_size)
def _validate_pruning_dim(t, dim):
r"""
Args:
t (torch.Tensor): tensor representing the parameter to prune
dim (int): index of the dim along which we define channels to prune
"""
if dim >= t.dim():
raise IndexError("Invalid index {} for tensor of size {}".format(dim, t.shape))
def _compute_norm(t, n, dim):
r"""Compute the L_n-norm across all entries in tensor `t` along all dimension
except for the one identified by dim.
Example: if `t` is of shape, say, 3x2x4 and dim=2 (the last dim),
then norm will have Size [4], and each entry will represent the
`L_n`-norm computed using the 3x2=6 entries for each of the 4 channels.
Args:
t (torch.Tensor): tensor representing the parameter to prune
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument p in torch.norm
dim (int): dim identifying the channels to prune
Returns:
norm (torch.Tensor): L_n norm computed across all dimensions except
for `dim`. By construction, `norm.shape = t.shape[-1]`.
"""
# dims = all axes, except for the one identified by `dim`
dims = list(range(t.dim()))
# convert negative indexing
if dim < 0:
dim = dims[dim]
dims.remove(dim)
norm = torch.norm(t, p=n, dim=dims)
return norm
|
pytorch-master
|
torch/nn/utils/prune.py
|
from enum import Enum, auto
import torch
from torch import Tensor
from ..utils import parametrize
from ..modules import Module
from .. import functional as F
from typing import Optional
__all__ = ['orthogonal', 'spectral_norm']
def _is_orthogonal(Q, eps=None):
n, k = Q.size(-2), Q.size(-1)
Id = torch.eye(k, dtype=Q.dtype, device=Q.device)
# A reasonable eps, but not too large
eps = 10. * n * torch.finfo(Q.dtype).eps
return torch.allclose(Q.mH @ Q, Id, atol=eps)
def _make_orthogonal(A):
""" Assume that A is a tall matrix.
Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative
"""
X, tau = torch.geqrf(A)
Q = torch.linalg.householder_product(X, tau)
# The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs
Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
return Q
class _OrthMaps(Enum):
matrix_exp = auto()
cayley = auto()
householder = auto()
class _Orthogonal(Module):
base: Tensor
def __init__(self,
weight,
orthogonal_map: _OrthMaps,
*,
use_trivialization=True) -> None:
super().__init__()
# Note [Householder complex]
# For complex tensors, it is not possible to compute the tensor `tau` necessary for
# linalg.householder_product from the reflectors.
# To see this, note that the reflectors have a shape like:
# 0 0 0
# * 0 0
# * * 0
# which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters
# to parametrize the unitary matrices. Saving tau on its own does not work either, because
# not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise
# them as independent tensors we would not maintain the constraint
# An equivalent reasoning holds for rectangular matrices
if weight.is_complex() and orthogonal_map == _OrthMaps.householder:
raise ValueError("The householder parametrization does not support complex tensors.")
self.shape = weight.shape
self.orthogonal_map = orthogonal_map
if use_trivialization:
self.register_buffer("base", None)
def forward(self, X: torch.Tensor) -> torch.Tensor:
n, k = X.size(-2), X.size(-1)
transposed = n < k
if transposed:
X = X.mT
n, k = k, n
# Here n > k and X is a tall matrix
if self.orthogonal_map == _OrthMaps.matrix_exp or self.orthogonal_map == _OrthMaps.cayley:
# We just need n x k - k(k-1)/2 parameters
X = X.tril()
if n != k:
# Embed into a square matrix
X = torch.cat([X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
A = X - X.mH
# A is skew-symmetric (or skew-hermitian)
if self.orthogonal_map == _OrthMaps.matrix_exp:
Q = torch.matrix_exp(A)
elif self.orthogonal_map == _OrthMaps.cayley:
# Computes the Cayley retraction (I+A/2)(I-A/2)^{-1}
Id = torch.eye(n, dtype=A.dtype, device=A.device)
Q = torch.linalg.solve(torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5))
# Q is now orthogonal (or unitary) of size (..., n, n)
if n != k:
Q = Q[..., :k]
# Q is now the size of the X (albeit perhaps transposed)
else:
# X is real here, as we do not support householder with complex numbers
A = X.tril(diagonal=-1)
tau = 2. / (1. + (A * A).sum(dim=-2))
Q = torch.linalg.householder_product(A, tau)
# The diagonal of X is 1's and -1's
# We do not want to differentiate through this or update the diagonal of X hence the casting
Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2)
if hasattr(self, "base"):
Q = self.base @ Q
if transposed:
Q = Q.mT
return Q
@torch.autograd.no_grad()
def right_inverse(self, Q: torch.Tensor) -> torch.Tensor:
if Q.shape != self.shape:
raise ValueError(f"Expected a matrix or batch of matrices of shape {self.shape}. "
f"Got a tensor of shape {Q.shape}.")
Q_init = Q
n, k = Q.size(-2), Q.size(-1)
transpose = n < k
if transpose:
Q = Q.mT
n, k = k, n
# We always make sure to always copy Q in every path
if not hasattr(self, "base"):
# Note [right_inverse expm cayley]
# If we do not have use_trivialization=True, we just implement the inverse of the forward
# map for the Householder. To see why, think that for the Cayley map,
# we would need to find the matrix X \in R^{n x k} such that:
# Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
# A = Y - Y.mH
# cayley(A)[:, :k]
# gives the original tensor. It is not clear how to do this.
# Perhaps via some algebraic manipulation involving the QR like that of
# Corollary 2.2 in Edelman, Arias and Smith?
if self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp:
raise NotImplementedError("It is not possible to assign to the matrix exponential "
"or the Cayley parametrizations when use_trivialization=False.")
# If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition.
# Here Q is always real because we do not support householder and complex matrices.
# See note [Householder complex]
A, tau = torch.geqrf(Q)
# We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could
# decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition
# The diagonal of Q is the diagonal of R from the qr decomposition
A.diagonal(dim1=-2, dim2=-1).sign_()
# Equality with zero is ok because LAPACK returns exactly zero when it does not want
# to use a particular reflection
A.diagonal(dim1=-2, dim2=-1)[tau == 0.] *= -1
return A.mT if transpose else A
else:
if n == k:
# We check whether Q is orthogonal
if not _is_orthogonal(Q):
Q = _make_orthogonal(Q)
else: # Is orthogonal
Q = Q.clone()
else:
# Complete Q into a full n x n orthogonal matrix
N = torch.randn(*(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device)
Q = torch.cat([Q, N], dim=-1)
Q = _make_orthogonal(Q)
self.base = Q
# It is necessary to return the -Id, as we use the diagonal for the
# Householder parametrization. Using -Id makes:
# householder(torch.zeros(m,n)) == torch.eye(m,n)
# Poor man's version of eye_like
neg_Id = torch.zeros_like(Q_init)
neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.)
return neg_Id
def orthogonal(module: Module,
name: str = 'weight',
orthogonal_map: Optional[str] = None,
*,
use_trivialization: bool = True) -> Module:
r"""Applies an orthogonal or unitary parametrization to a matrix or a batch of matrices.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized
matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as
.. math::
\begin{align*}
Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\
QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n}
\end{align*}
where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex
and the transpose when :math:`Q` is real-valued, and
:math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n`
and orthonormal rows otherwise.
If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`.
The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor:
- ``"matrix_exp"``/``"cayley"``:
the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_
:math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric
:math:`A` to give an orthogonal matrix.
- ``"householder"``: computes a product of Householder reflectors
(:func:`~torch.linalg.householder_product`).
``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than
``"householder"``, but they are slower to compute for very thin or very wide matrices.
If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework",
where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under
``module.parametrizations.weight[0].base``. This helps the
convergence of the parametrized layer at the expense of some extra memory use.
See `Trivializations for Gradient-Based Optimization on Manifolds`_ .
Initial value of :math:`Q`:
If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value
of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case)
and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`).
Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``.
Otherwise, the initial value is the result of the composition of all the registered
parametrizations applied to the original tensor.
.. note::
This function is implemented using the parametrization functionality
in :func:`~torch.nn.utils.parametrize.register_parametrization`.
.. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map
.. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501
Args:
module (nn.Module): module on which to register the parametrization.
name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``.
orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``.
Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise.
use_trivialization (bool, optional): whether to use the dynamic trivialization framework.
Default: ``True``.
Returns:
The original module with an orthogonal parametrization registered to the specified
weight
Example::
>>> # xdoctest: +REQUIRES(--lapack)
>>> orth_linear = orthogonal(nn.Linear(20, 40))
>>> orth_linear
ParametrizedLinear(
in_features=20, out_features=40, bias=True
(parametrizations): ModuleDict(
(weight): ParametrizationList(
(0): _Orthogonal()
)
)
)
>>> # xdoctest: +IGNORE_WANT
>>> Q = orth_linear.weight
>>> torch.dist(Q.T @ Q, torch.eye(20))
tensor(4.9332e-07)
"""
weight = getattr(module, name, None)
if not isinstance(weight, Tensor):
raise ValueError(
"Module '{}' has no parameter or buffer with name '{}'".format(module, name)
)
# We could implement this for 1-dim tensors as the maps on the sphere
# but I believe it'd bite more people than it'd help
if weight.ndim < 2:
raise ValueError("Expected a matrix or batch of matrices. "
f"Got a tensor of {weight.ndim} dimensions.")
if orthogonal_map is None:
orthogonal_map = "matrix_exp" if weight.size(-2) == weight.size(-1) or weight.is_complex() else "householder"
orth_enum = getattr(_OrthMaps, orthogonal_map, None)
if orth_enum is None:
raise ValueError('orthogonal_map has to be one of "matrix_exp", "cayley", "householder". '
f'Got: {orthogonal_map}')
orth = _Orthogonal(weight,
orth_enum,
use_trivialization=use_trivialization)
parametrize.register_parametrization(module, name, orth, unsafe=True)
return module
class _SpectralNorm(Module):
def __init__(
self,
weight: torch.Tensor,
n_power_iterations: int = 1,
dim: int = 0,
eps: float = 1e-12
) -> None:
super().__init__()
ndim = weight.ndim
if dim >= ndim or dim < -ndim:
raise IndexError("Dimension out of range (expected to be in range of "
f"[-{ndim}, {ndim - 1}] but got {dim})")
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(n_power_iterations))
self.dim = dim if dim >= 0 else dim + ndim
self.eps = eps
if ndim > 1:
# For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward)
self.n_power_iterations = n_power_iterations
weight_mat = self._reshape_weight_to_matrix(weight)
h, w = weight_mat.size()
u = weight_mat.new_empty(h).normal_(0, 1)
v = weight_mat.new_empty(w).normal_(0, 1)
self.register_buffer('_u', F.normalize(u, dim=0, eps=self.eps))
self.register_buffer('_v', F.normalize(v, dim=0, eps=self.eps))
# Start with u, v initialized to some reasonable values by performing a number
# of iterations of the power method
self._power_method(weight_mat, 15)
def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor:
# Precondition
assert weight.ndim > 1
if self.dim != 0:
# permute dim to front
weight = weight.permute(self.dim, *(d for d in range(weight.dim()) if d != self.dim))
return weight.flatten(1)
@torch.autograd.no_grad()
def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None:
# See original note at torch/nn/utils/spectral_norm.py
# NB: If `do_power_iteration` is set, the `u` and `v` vectors are
# updated in power iteration **in-place**. This is very important
# because in `DataParallel` forward, the vectors (being buffers) are
# broadcast from the parallelized module to each module replica,
# which is a new module object created on the fly. And each replica
# runs its own spectral norm power iteration. So simply assigning
# the updated vectors to the module this function runs on will cause
# the update to be lost forever. And the next time the parallelized
# module is replicated, the same randomly initialized vectors are
# broadcast and used!
#
# Therefore, to make the change propagate back, we rely on two
# important behaviors (also enforced via tests):
# 1. `DataParallel` doesn't clone storage if the broadcast tensor
# is already on correct device; and it makes sure that the
# parallelized module is already on `device[0]`.
# 2. If the out tensor in `out=` kwarg has correct shape, it will
# just fill in the values.
# Therefore, since the same power iteration is performed on all
# devices, simply updating the tensors in-place will make sure that
# the module replica on `device[0]` will update the _u vector on the
# parallized module (by shared storage).
#
# However, after we update `u` and `v` in-place, we need to **clone**
# them before using them to normalize the weight. This is to support
# backproping through two forward passes, e.g., the common pattern in
# GAN training: loss = D(real) - D(fake). Otherwise, engine will
# complain that variables needed to do backward for the first forward
# (i.e., the `u` and `v` vectors) are changed in the second forward.
# Precondition
assert weight_mat.ndim > 1
for _ in range(n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
self._u = F.normalize(torch.mv(weight_mat, self._v), # type: ignore[has-type]
dim=0, eps=self.eps, out=self._u) # type: ignore[has-type]
self._v = F.normalize(torch.mv(weight_mat.t(), self._u),
dim=0, eps=self.eps, out=self._v) # type: ignore[has-type]
def forward(self, weight: torch.Tensor) -> torch.Tensor:
if weight.ndim == 1:
# Faster and more exact path, no need to approximate anything
return F.normalize(weight, dim=0, eps=self.eps)
else:
weight_mat = self._reshape_weight_to_matrix(weight)
if self.training:
self._power_method(weight_mat, self.n_power_iterations)
# See above on why we need to clone
u = self._u.clone(memory_format=torch.contiguous_format)
v = self._v.clone(memory_format=torch.contiguous_format)
# The proper way of computing this should be through F.bilinear, but
# it seems to have some efficiency issues:
# https://github.com/pytorch/pytorch/issues/58093
sigma = torch.dot(u, torch.mv(weight_mat, v))
return weight / sigma
def right_inverse(self, value: torch.Tensor) -> torch.Tensor:
# we may want to assert here that the passed value already
# satisfies constraints
return value
def spectral_norm(module: Module,
name: str = 'weight',
n_power_iterations: int = 1,
eps: float = 1e-12,
dim: Optional[int] = None) -> Module:
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})},
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
When applied on a vector, it simplifies to
.. math::
\mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant
of the model. :math:`\sigma` is approximated performing one iteration of the
`power method`_ every time the weight is accessed. If the dimension of the
weight tensor is greater than 2, it is reshaped to 2D in power iteration
method to get spectral norm.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`power method`: https://en.wikipedia.org/wiki/Power_iteration
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
.. note::
This function is implemented using the parametrization functionality
in :func:`~torch.nn.utils.parametrize.register_parametrization`. It is a
reimplementation of :func:`torch.nn.utils.spectral_norm`.
.. note::
When this constraint is registered, the singular vectors associated to the largest
singular value are estimated rather than sampled at random. These are then updated
performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor
is accessed with the module on `training` mode.
.. note::
If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`,
is in training mode on removal, it will perform another power iteration.
If you'd like to avoid this iteration, set the module to eval mode
before its removal.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter. Default: ``"weight"``.
n_power_iterations (int, optional): number of power iterations to
calculate spectral norm. Default: ``1``.
eps (float, optional): epsilon for numerical stability in
calculating norms. Default: ``1e-12``.
dim (int, optional): dimension corresponding to number of outputs.
Default: ``0``, except for modules that are instances of
ConvTranspose{1,2,3}d, when it is ``1``
Returns:
The original module with a new parametrization registered to the specified
weight
Example::
>>> # xdoctest: +REQUIRES(--lapack)
>>> snm = spectral_norm(nn.Linear(20, 40))
>>> snm
ParametrizedLinear(
in_features=20, out_features=40, bias=True
(parametrizations): ModuleDict(
(weight): ParametrizationList(
(0): _SpectralNorm()
)
)
)
>>> torch.linalg.matrix_norm(snm.weight, 2)
tensor(1.0000, grad_fn=<CopyBackwards>)
"""
weight = getattr(module, name, None)
if not isinstance(weight, Tensor):
raise ValueError(
"Module '{}' has no parameter or buffer with name '{}'".format(module, name)
)
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
parametrize.register_parametrization(module, name, _SpectralNorm(weight, n_power_iterations, dim, eps))
return module
|
pytorch-master
|
torch/nn/utils/parametrizations.py
|
import inspect
import torch
def skip_init(module_cls, *args, **kwargs):
r"""
Given a module class object and args / kwargs, instantiates the module without initializing
parameters / buffers. This can be useful if initialization is slow or if custom initialization will
be performed, making the default initialization unnecessary. There are some caveats to this, due to
the way this function is implemented:
1. The module must accept a `device` arg in its constructor that is passed to any parameters
or buffers created during construction.
2. The module must not perform any computation on parameters in its constructor except
initialization (i.e. functions from :mod:`torch.nn.init`).
If these conditions are satisfied, the module can be instantiated with parameter / buffer values
uninitialized, as if having been created using :func:`torch.empty`.
Args:
module_cls: Class object; should be a subclass of :class:`torch.nn.Module`
args: args to pass to the module's constructor
kwargs: kwargs to pass to the module's constructor
Returns:
Instantiated module with uninitialized parameters / buffers
Example::
>>> import torch
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1)
>>> m.weight
Parameter containing:
tensor([[0.0000e+00, 1.5846e+29, 7.8307e+00, 2.5250e-29, 1.1210e-44]],
requires_grad=True)
>>> m2 = torch.nn.utils.skip_init(torch.nn.Linear, in_features=6, out_features=1)
>>> m2.weight
Parameter containing:
tensor([[-1.4677e+24, 4.5915e-41, 1.4013e-45, 0.0000e+00, -1.4677e+24,
4.5915e-41]], requires_grad=True)
"""
if not issubclass(module_cls, torch.nn.Module):
raise RuntimeError('Expected a Module; got {}'.format(module_cls))
if 'device' not in inspect.signature(module_cls).parameters:
raise RuntimeError('Module must support a \'device\' arg to skip initialization')
final_device = kwargs.pop('device', 'cpu')
kwargs['device'] = 'meta'
return module_cls(*args, **kwargs).to_empty(device=final_device)
|
pytorch-master
|
torch/nn/utils/init.py
|
from collections import namedtuple
import warnings
import torch
from torch import Tensor
from ... import _VF
from ..._jit_internal import Optional
from typing import List, Tuple, Union, Iterable
__all__ = ['PackedSequence', 'invert_permutation', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence',
'unpad_sequence', 'pack_sequence', 'unpack_sequence']
PackedSequence_ = namedtuple('PackedSequence_',
['data', 'batch_sizes', 'sorted_indices', 'unsorted_indices'])
# type annotation for PackedSequence_ to make it compatible with TorchScript
PackedSequence_.__annotations__ = {'data': torch.Tensor, 'batch_sizes': torch.Tensor,
'sorted_indices': Optional[torch.Tensor],
'unsorted_indices': Optional[torch.Tensor]}
def bind(optional, fn):
if optional is None:
return None
return fn(optional)
class PackedSequence(PackedSequence_):
r"""Holds the data and list of :attr:`batch_sizes` of a packed sequence.
All RNN modules accept packed sequences as inputs.
Note:
Instances of this class should never be created manually. They are meant
to be instantiated by functions like :func:`pack_padded_sequence`.
Batch sizes represent the number elements at each sequence step in
the batch, not the varying sequence lengths passed to
:func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x``
the :class:`PackedSequence` would contain data ``axbc`` with
``batch_sizes=[2,1,1]``.
Attributes:
data (Tensor): Tensor containing packed sequence
batch_sizes (Tensor): Tensor of integers holding
information about the batch size at each sequence step
sorted_indices (Tensor, optional): Tensor of integers holding how this
:class:`PackedSequence` is constructed from sequences.
unsorted_indices (Tensor, optional): Tensor of integers holding how this
to recover the original sequences with correct order.
.. note::
:attr:`data` can be on arbitrary device and of arbitrary dtype.
:attr:`sorted_indices` and :attr:`unsorted_indices` must be ``torch.int64``
tensors on the same device as :attr:`data`.
However, :attr:`batch_sizes` should always be a CPU ``torch.int64`` tensor.
This invariant is maintained throughout :class:`PackedSequence` class,
and all functions that construct a `:class:PackedSequence` in PyTorch
(i.e., they only pass in tensors conforming to this constraint).
"""
def __new__(cls, data, batch_sizes=None, sorted_indices=None, unsorted_indices=None):
return super(PackedSequence, cls).__new__(
cls,
*_packed_sequence_init_args(data, batch_sizes, sorted_indices,
unsorted_indices))
# NOTE [ device and dtype of a PackedSequence ]
#
# See the note above in doc string (starting with ":attr:`data` can be on
# arbitrary device...").
def pin_memory(self):
# Why not convert `batch_sizes`?
# See NOTE [ device and dtype of a PackedSequence ]
return type(self)(self.data.pin_memory(), self.batch_sizes,
bind(self.sorted_indices, lambda t: t.pin_memory()),
bind(self.unsorted_indices, lambda t: t.pin_memory()))
def cuda(self, *args, **kwargs):
# Tests to see if 'cuda' should be added to kwargs
ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
if ex.is_cuda:
return self.to(*args, **kwargs)
return self.to(*args, device='cuda', **kwargs)
def cpu(self, *args, **kwargs):
ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
if ex.device.type == 'cpu':
return self.to(*args, **kwargs)
return self.to(*args, device='cpu', **kwargs)
def double(self):
return self.to(dtype=torch.double)
def float(self):
return self.to(dtype=torch.float)
def half(self):
return self.to(dtype=torch.half)
def long(self):
return self.to(dtype=torch.long)
def int(self):
return self.to(dtype=torch.int)
def short(self):
return self.to(dtype=torch.short)
def char(self):
return self.to(dtype=torch.int8)
def byte(self):
return self.to(dtype=torch.uint8)
def to(self, *args, **kwargs):
r"""Performs dtype and/or device conversion on `self.data`.
It has similar signature as :meth:`torch.Tensor.to`, except optional
arguments like `non_blocking` and `copy` should be passed as kwargs,
not args, or they will not apply to the index tensors.
.. note::
If the ``self.data`` Tensor already has the correct :class:`torch.dtype`
and :class:`torch.device`, then ``self`` is returned.
Otherwise, returns a copy with the desired configuration.
"""
# Why not convert `batch_sizes`?
# See NOTE [ device and dtype of a PackedSequence ]
data = self.data.to(*args, **kwargs)
if data is self.data:
return self
else:
# Does not forward device or dtype arg/kwargs, device is set from data.device
kwargs = {k : v for k, v in filter(lambda t: t[0] != 'device' and t[0] != 'dtype', kwargs.items())}
sorted_indices = bind(self.sorted_indices, lambda t: t.to(data.device, **kwargs))
unsorted_indices = bind(self.unsorted_indices, lambda t: t.to(data.device, **kwargs))
return type(self)(data, self.batch_sizes, sorted_indices, unsorted_indices)
@property
def is_cuda(self):
r"""Returns true if `self.data` stored on a gpu"""
return self.data.is_cuda
def is_pinned(self):
r"""Returns true if `self.data` stored on in pinned memory"""
return self.data.is_pinned()
# TorchScript doesn't support constructors on named tuples, so we use this helper
# method to construct PackedSequence
def _packed_sequence_init_args(
data: Tensor,
batch_sizes: Optional[Tensor] = None,
sorted_indices: Optional[Tensor] = None,
unsorted_indices: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
# NB: if unsorted_indices is provided, it should be the inverse permutation
# to sorted_indices. Don't assert it here because the PackedSequence ctor
# should only be used internally.
if unsorted_indices is None:
unsorted_indices = invert_permutation(sorted_indices)
# support being called as `PackedSequence(data, batch_sizes, sorted_indices)`
if batch_sizes is not None:
# TODO: Re-enable this check (.type isn't supported in TorchScript)
if batch_sizes.device.type != 'cpu':
raise ValueError(
"batch_sizes should always be on CPU. "
"Instances of PackedSequence should never be created manually. "
"They should be instantiated by functions like pack_sequence "
"and pack_padded_sequences in nn.utils.rnn. "
"https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence")
return data, batch_sizes, sorted_indices, unsorted_indices
# support being called as `PackedSequence((data, batch_sizes), *, sorted_indices)`
else:
assert isinstance(data, (list, tuple)) and len(data) == 2
return data[0], data[1], sorted_indices, unsorted_indices
def _packed_sequence_init(
data: Tensor,
batch_sizes: Optional[Tensor] = None,
sorted_indices: Optional[Tensor] = None,
unsorted_indices: Optional[Tensor] = None,
) -> PackedSequence:
data, batch_sizes, sorted_indices, unsorted_indices = _packed_sequence_init_args(
data, batch_sizes, sorted_indices, unsorted_indices)
return PackedSequence(data, batch_sizes, sorted_indices, unsorted_indices)
def invert_permutation(permutation: Optional[Tensor]) -> Optional[Tensor]:
if permutation is None:
return None
output = torch.empty_like(permutation, memory_format=torch.legacy_contiguous_format)
output.scatter_(0, permutation,
torch.arange(0, permutation.numel(), device=permutation.device))
return output
def pack_padded_sequence(
input: Tensor,
lengths: Tensor,
batch_first: bool = False,
enforce_sorted: bool = True,
) -> PackedSequence:
r"""Packs a Tensor containing padded sequences of variable length.
:attr:`input` can be of size ``T x B x *`` where `T` is the length of the
longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and
``*`` is any number of dimensions (including 0). If ``batch_first`` is
``True``, ``B x T x *`` :attr:`input` is expected.
For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is
``True``, the sequences should be sorted by length in a decreasing order, i.e.
``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the shortest
one. `enforce_sorted = True` is only necessary for ONNX export.
Note:
This function accepts any input that has at least two dimensions. You
can apply it to pack the labels, and use the output of the RNN with
them to compute the loss directly. A Tensor can be retrieved from
a :class:`PackedSequence` object by accessing its ``.data`` attribute.
Args:
input (Tensor): padded batch of variable length sequences.
lengths (Tensor or list(int)): list of sequence lengths of each batch
element (must be on the CPU if provided as a tensor).
batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *``
format.
enforce_sorted (bool, optional): if ``True``, the input is expected to
contain sequences sorted by length in a decreasing order. If
``False``, the input will get sorted unconditionally. Default: ``True``.
Returns:
a :class:`PackedSequence` object
"""
if torch._C._get_tracing_state() and not isinstance(lengths, torch.Tensor):
warnings.warn('pack_padded_sequence has been called with a Python list of '
'sequence lengths. The tracer cannot track the data flow of Python '
'values, and it will treat them as constants, likely rendering '
'the trace incorrect for any other combination of lengths.',
stacklevel=2)
lengths = torch.as_tensor(lengths, dtype=torch.int64)
if enforce_sorted:
sorted_indices = None
else:
lengths, sorted_indices = torch.sort(lengths, descending=True)
sorted_indices = sorted_indices.to(input.device)
batch_dim = 0 if batch_first else 1
input = input.index_select(batch_dim, sorted_indices)
data, batch_sizes = \
_VF._pack_padded_sequence(input, lengths, batch_first)
return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
def pad_packed_sequence(
sequence: PackedSequence,
batch_first: bool = False,
padding_value: float = 0.0,
total_length: Optional[int] = None,
) -> Tuple[Tensor, Tensor]:
r"""Pads a packed batch of variable length sequences.
It is an inverse operation to :func:`pack_padded_sequence`.
The returned Tensor's data will be of size ``T x B x *``, where `T` is the length
of the longest sequence and `B` is the batch size. If ``batch_first`` is True,
the data will be transposed into ``B x T x *`` format.
Example:
>>> from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
>>> seq = torch.tensor([[1,2,0], [3,0,0], [4,5,6]])
>>> lens = [2, 1, 3]
>>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False)
>>> packed
PackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]),
sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0]))
>>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True)
>>> seq_unpacked
tensor([[1, 2, 0],
[3, 0, 0],
[4, 5, 6]])
>>> lens_unpacked
tensor([2, 1, 3])
.. note::
:attr:`total_length` is useful to implement the
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
:class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
See :ref:`this FAQ section <pack-rnn-unpack-with-data-parallelism>` for
details.
Args:
sequence (PackedSequence): batch to pad
batch_first (bool, optional): if ``True``, the output will be in ``B x T x *``
format.
padding_value (float, optional): values for padded elements.
total_length (int, optional): if not ``None``, the output will be padded to
have length :attr:`total_length`. This method will throw :class:`ValueError`
if :attr:`total_length` is less than the max sequence length in
:attr:`sequence`.
Returns:
Tuple of Tensor containing the padded sequence, and a Tensor
containing the list of lengths of each sequence in the batch.
Batch elements will be re-ordered as they were ordered originally when
the batch was passed to ``pack_padded_sequence`` or ``pack_sequence``.
"""
max_seq_length = sequence.batch_sizes.size(0)
if total_length is not None:
if total_length < max_seq_length:
raise ValueError("Expected total_length to be at least the length "
"of the longest sequence in input, but got "
"total_length={} and max sequence length being {}"
.format(total_length, max_seq_length))
max_seq_length = total_length
padded_output, lengths = _VF._pad_packed_sequence(
sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length)
unsorted_indices = sequence.unsorted_indices
if unsorted_indices is not None:
batch_dim = 0 if batch_first else 1
return padded_output.index_select(batch_dim, unsorted_indices), lengths[unsorted_indices.cpu()]
return padded_output, lengths
def pad_sequence(
sequences: Union[Tensor, List[Tensor]],
batch_first: bool = False,
padding_value: float = 0.0,
) -> Tensor:
r"""Pad a list of variable length Tensors with ``padding_value``
``pad_sequence`` stacks a list of Tensors along a new dimension,
and pads them to equal length. For example, if the input is list of
sequences with size ``L x *`` and if batch_first is False, and ``T x B x *``
otherwise.
`B` is batch size. It is equal to the number of elements in ``sequences``.
`T` is length of the longest sequence.
`L` is length of the sequence.
`*` is any number of trailing dimensions, including none.
Example:
>>> from torch.nn.utils.rnn import pad_sequence
>>> a = torch.ones(25, 300)
>>> b = torch.ones(22, 300)
>>> c = torch.ones(15, 300)
>>> pad_sequence([a, b, c]).size()
torch.Size([25, 3, 300])
Note:
This function returns a Tensor of size ``T x B x *`` or ``B x T x *``
where `T` is the length of the longest sequence. This function assumes
trailing dimensions and type of all the Tensors in sequences are same.
Args:
sequences (list[Tensor]): list of variable length sequences.
batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
``T x B x *`` otherwise. Default: False.
padding_value (float, optional): value for padded elements. Default: 0.
Returns:
Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``.
Tensor of size ``B x T x *`` otherwise
"""
if not (torch.jit.is_tracing() or torch.jit.is_scripting()):
# JIT doesn't support `Iterable`
if not isinstance(sequences, Iterable):
msg = ('pad_sequence: Expected iterable for input sequences, but got arg of type: '
f'{type(sequences)}')
raise RuntimeError(msg)
# In JIT context this leads to,
# RuntimeError: cannot statically infer the expected size of a list in this context
sequences = tuple(sequences)
else:
# For JIT, we only support Union[Tensor, Tuple[Tensor]]
if isinstance(sequences, torch.Tensor):
sequences = sequences.unbind(0)
# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
return torch._C._nn.pad_sequence(sequences, batch_first, padding_value)
def unpad_sequence(
padded_sequences: Tensor,
lengths: Tensor,
batch_first: bool = False,
) -> List[Tensor]:
r"""Unpad padded Tensor into a list of variable length Tensors
``unpad_sequence`` unstacks padded Tensor into a list of variable length Tensors.
Example:
>>> from torch.nn.utils.rnn import pad_sequence, unpad_sequence
>>> a = torch.ones(25, 300)
>>> b = torch.ones(22, 300)
>>> c = torch.ones(15, 300)
>>> sequences = [a, b, c]
>>> padded_sequences = pad_sequence(sequences)
>>> lengths = torch.as_tensor([v.size(0) for v in sequences])
>>> unpadded_sequences = unpad_sequence(padded_sequences, lengths)
>>> torch.allclose(sequences[0], unpadded_sequences[0])
True
>>> torch.allclose(sequences[1], unpadded_sequences[1])
True
>>> torch.allclose(sequences[2], unpadded_sequences[2])
True
Args:
padded_sequences (Tensor): padded sequences.
lengths (Tensor): length of original (unpadded) sequences.
batch_first (bool, optional): whether batch dimension first or not. Default: False.
Returns:
a list of :class:`Tensor` objects
"""
unpadded_sequences = []
if not batch_first:
padded_sequences.transpose_(0, 1)
max_length = padded_sequences.shape[1]
idx = torch.arange(max_length)
for seq, length in zip(padded_sequences, lengths):
mask = idx < length
unpacked_seq = seq[mask]
unpadded_sequences.append(unpacked_seq)
return unpadded_sequences
def pack_sequence(sequences: List[Tensor], enforce_sorted: bool = True) -> PackedSequence:
r"""Packs a list of variable length Tensors
Consecutive call of the next functions: ``pad_sequence``, ``pack_padded_sequence``.
``sequences`` should be a list of Tensors of size ``L x *``, where `L` is
the length of a sequence and `*` is any number of trailing dimensions,
including zero.
For unsorted sequences, use `enforce_sorted = False`. If ``enforce_sorted``
is ``True``, the sequences should be sorted in the order of decreasing length.
``enforce_sorted = True`` is only necessary for ONNX export.
Example:
>>> from torch.nn.utils.rnn import pack_sequence
>>> a = torch.tensor([1,2,3])
>>> b = torch.tensor([4,5])
>>> c = torch.tensor([6])
>>> pack_sequence([a, b, c])
PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None)
Args:
sequences (list[Tensor]): A list of sequences of decreasing length.
enforce_sorted (bool, optional): if ``True``, checks that the input
contains sequences sorted by length in a decreasing order. If
``False``, this condition is not checked. Default: ``True``.
Returns:
a :class:`PackedSequence` object
"""
lengths = torch.as_tensor([v.size(0) for v in sequences])
return pack_padded_sequence(pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted)
def unpack_sequence(packed_sequences: PackedSequence) -> List[Tensor]:
r"""Unpacks PackedSequence into a list of variable length Tensors
``packed_sequences`` should be a PackedSequence object.
Example:
>>> from torch.nn.utils.rnn import pack_sequence, unpack_sequence
>>> a = torch.tensor([1,2,3])
>>> b = torch.tensor([4,5])
>>> c = torch.tensor([6])
>>> sequences = [a, b, c]
>>> print(sequences)
[tensor([1, 2, 3]), tensor([4, 5]), tensor([6])]
>>> packed_sequences = pack_sequence(sequences)
>>> print(packed_sequences)
PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None)
>>> unpacked_sequences = unpack_sequence(packed_sequences)
>>> print(unpacked_sequences)
[tensor([1, 2, 3]), tensor([4, 5]), tensor([6])]
Args:
packed_sequences (PackedSequence): A PackedSequence object.
Returns:
a list of :class:`Tensor` objects
"""
padded_sequences, lengths = pad_packed_sequence(packed_sequences, batch_first=True)
unpacked_sequences = unpad_sequence(padded_sequences, lengths, batch_first=True)
return unpacked_sequences
|
pytorch-master
|
torch/nn/utils/rnn.py
|
import warnings
import torch
from torch._six import inf
from typing import Union, Iterable
_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]]
__all__ = ['clip_grad_norm_', 'clip_grad_norm', 'clip_grad_value_']
def clip_grad_norm_(
parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0,
error_if_nonfinite: bool = False) -> torch.Tensor:
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Args:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
error_if_nonfinite (bool): if True, an error is thrown if the total
norm of the gradients from :attr:`parameters` is ``nan``,
``inf``, or ``-inf``. Default: False (will switch to True in the future)
Returns:
Total norm of the parameter gradients (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
grads = [p.grad for p in parameters if p.grad is not None]
max_norm = float(max_norm)
norm_type = float(norm_type)
if len(grads) == 0:
return torch.tensor(0.)
device = grads[0].device
if norm_type == inf:
norms = [g.detach().abs().max().to(device) for g in grads]
total_norm = norms[0] if len(norms) == 1 else torch.max(torch.stack(norms))
else:
total_norm = torch.norm(torch.stack([torch.norm(g.detach(), norm_type).to(device) for g in grads]), norm_type)
if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()):
raise RuntimeError(
f'The total norm of order {norm_type} for gradients from '
'`parameters` is non-finite, so it cannot be clipped. To disable '
'this error and scale the gradients by the non-finite norm anyway, '
'set `error_if_nonfinite=False`')
clip_coef = max_norm / (total_norm + 1e-6)
# Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so
# avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization
# when the gradients do not reside in CPU memory.
clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
for g in grads:
g.detach().mul_(clip_coef_clamped.to(g.device))
return total_norm
def clip_grad_norm(
parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.,
error_if_nonfinite: bool = False) -> torch.Tensor:
r"""Clips gradient norm of an iterable of parameters.
.. warning::
This method is now deprecated in favor of
:func:`torch.nn.utils.clip_grad_norm_`.
"""
warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor "
"of torch.nn.utils.clip_grad_norm_.", stacklevel=2)
return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite)
def clip_grad_value_(parameters: _tensor_or_tensors, clip_value: float) -> None:
r"""Clips gradient of an iterable of parameters at specified value.
Gradients are modified in-place.
Args:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
clip_value (float or int): maximum allowed value of the gradients.
The gradients are clipped in the range
:math:`\left[\text{-clip\_value}, \text{clip\_value}\right]`
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
clip_value = float(clip_value)
for p in filter(lambda p: p.grad is not None, parameters):
p.grad.data.clamp_(min=-clip_value, max=clip_value)
|
pytorch-master
|
torch/nn/utils/clip_grad.py
|
from functools import reduce
import operator
import torch
import torch.nn.functional as F
from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads
from .expanded_weights_utils import standard_kwargs, \
forward_helper, set_grad_sample_if_exists, unpack_expanded_weight_or_tensor
from typing import List, Optional
@implements_per_sample_grads(F.group_norm)
class GroupNormPerSampleGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs):
expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs)
input, num_groups = expanded_args
N = input.shape[0]
C = input.shape[1]
HxW = reduce(operator.mul, input.shape[2:], 1)
weight, bias, eps = expanded_kwargs['weight'], expanded_kwargs['bias'], expanded_kwargs['eps']
output, mean, rstd = forward_helper(torch.native_group_norm, (input, weight, bias, N, C, HxW, num_groups, eps), {})
ctx.input, ctx.num_groups = input, num_groups
ctx.weight, ctx.eps = weight, eps
ctx.mean, ctx.rstd = mean, rstd
if isinstance(bias, ExpandedWeight):
ctx.bias = bias
if input.requires_grad and isinstance(weight, ExpandedWeight):
ctx.weight = weight
return output
@staticmethod
def backward(ctx, grad_output):
input, num_groups = ctx.input, ctx.num_groups
weight, bias, eps = ctx.weight, ctx.bias, ctx.eps
mean, rstd = ctx.mean, ctx.rstd
results: List[Optional[torch.Tensor]] = []
results.append(None) # for kwarg names
results.append(None) # for op reference
if input.requires_grad:
weight_c = unpack_expanded_weight_or_tensor(weight, lambda t: t.contiguous())
input_c = input.contiguous()
grad_output_c = grad_output.contiguous() if grad_output is not None else None
N = input.shape[0]
C = input.shape[1]
HxW = 1
for s in input.shape[2:]:
HxW *= s
bw_fn = torch.ops.aten.native_group_norm_backward
results.append(bw_fn(grad_output_c, input_c,
mean, rstd, weight_c, N, C, HxW, num_groups, (True, False, False))[0])
else:
results.append(None)
# weight and bias don't compute batched gradients; no other arguments are differentiable
results = results + [None] * 4
# set grad_sample field for weight and bias with per sample gradients
if hasattr(ctx, "weight"):
set_grad_sample_if_exists(weight,
lambda _: torch.einsum("ni...->ni", F.group_norm(input, num_groups, eps=eps) * grad_output))
if hasattr(ctx, "bias"):
set_grad_sample_if_exists(bias, lambda _: torch.einsum("ni...->ni", grad_output))
return tuple(results)
|
pytorch-master
|
torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py
|
import torch
import torch.nn.functional as F
from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads
from .expanded_weights_utils import forward_helper, set_grad_sample_if_exists, \
standard_kwargs, sum_over_all_but_batch_and_last_n, unpack_expanded_weight_or_tensor
from typing import List, Optional
@implements_per_sample_grads(F.layer_norm)
class LayerNormPerSampleGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs):
expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs)
input = expanded_args[0]
normalized_shape = expanded_args[1]
if len(input.shape) <= len(normalized_shape):
raise RuntimeError("Expanded Weights: Layer norm should not normalize over batch dimension for per sample gradient"
f"computations but got that normalized shape, {normalized_shape}, matched input shape.")
output, mean, rstd = forward_helper(torch.native_layer_norm, expanded_args, expanded_kwargs)
ctx.args = expanded_args
if input.requires_grad or isinstance(expanded_kwargs['weight'], ExpandedWeight):
ctx.weight = expanded_kwargs['weight']
if input.requires_grad or isinstance(expanded_kwargs['bias'], ExpandedWeight):
ctx.bias = expanded_kwargs['bias']
ctx.eps = expanded_kwargs['eps']
ctx.mean, ctx.rstd = mean, rstd
return output
@staticmethod
def backward(ctx, grad_output):
def weight_per_sample_grad(weight):
return sum_over_all_but_batch_and_last_n(F.layer_norm(input, normalized_shape, eps=ctx.eps) * grad_output, weight.dim())
input, normalized_shape = ctx.args
mean, rstd = ctx.mean, ctx.rstd
results: List[Optional[torch.Tensor]] = []
results.append(None) # for kwarg names
results.append(None) # for op reference
if input.requires_grad:
weight_ = unpack_expanded_weight_or_tensor(ctx.weight)
bias_ = unpack_expanded_weight_or_tensor(ctx.bias)
results.append(torch.ops.aten.native_layer_norm_backward(
grad_output, input, normalized_shape, mean, rstd, weight_, bias_, (True, False, False))[0])
else:
results.append(None)
# weight and bias don't compute batched gradients; no other arguments are differentiable
results = results + [None] * 4
# set grad_sample field for weight and bias with per sample gradients
if hasattr(ctx, "weight"):
set_grad_sample_if_exists(ctx.weight, weight_per_sample_grad)
if hasattr(ctx, "bias"):
set_grad_sample_if_exists(ctx.bias, lambda bias: sum_over_all_but_batch_and_last_n(grad_output, bias.dim()))
return tuple(results)
|
pytorch-master
|
torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.