python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import caffe2.python.fakelowp.init_shared_libs # noqa
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from hypothesis import given, strategies as st, settings
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
"--caffe2_log_level=-3",
"--glow_global_fp16=1",
"--glow_clip_quant_range_to_fp16=1",
"--glow_global_fp16_constants=1"
])
class Int8OpsTest(serial.SerializedTestCase):
def _get_scale_zp(self, tensor):
tensor_max = np.max(tensor)
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))
if scale < 1e-6:
scale = np.float32(1e-6)
zero_point = 0 - tensor_min / scale
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)
@given(
n=st.integers(2, 1024),
rand_seed=st.integers(0, 65534),
non_zero_offset=st.booleans()
)
@settings(deadline=datetime.timedelta(seconds=50))
def test_int8_quantize(self, n, rand_seed, non_zero_offset):
print("n={}, rand_seed={}".format(n, rand_seed))
np.random.seed(rand_seed)
workspace.ResetWorkspace()
if non_zero_offset:
X_fp32 = np.random.uniform(-1, 1, size=(n, n)).astype(np.float16) \
.astype(np.float32)
else:
X_fp32 = np.random.rand(n, n).astype(np.float16).astype(np.float32)
W_fp32 = np.identity(n, dtype=np.float32)
b_fp32 = np.zeros((n,), dtype=np.float32)
X_scale, X_zero_point = self._get_scale_zp(X_fp32)
workspace.FeedBlob("X", X_fp32)
workspace.FeedBlob("W", W_fp32)
workspace.FeedBlob("b", b_fp32)
workspace.RunOperatorOnce(
core.CreateOperator(
"Int8FCPackWeight",
["W"],
["W_int8"],
engine="DNNLOWP",
save_unpacked_weights=True,
in_scale=X_scale,
)
)
ref_net = core.Net("net")
ref_net.Int8QuantizeNNPI(
["X"],
["X_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point
)
ref_net.Int8FCFakeAcc32NNPI(
["X_int8", "W_int8", "b"],
["Y_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point,
)
ref_net.Int8DequantizeNNPI(
["Y_int8"],
["Y"]
)
ref_net.Proto().external_output.append("Y")
# run ref_net
workspace.RunNetOnce(ref_net)
Y_fbgemm = workspace.FetchBlob("Y")
# run onnxifi net
ref_net.Proto().op[0].type = "Int8Quantize"
ref_net.Proto().op[1].type = "Int8FC"
ref_net.Proto().op[2].type = "Int8Dequantize"
net_onnxified = onnxifi_caffe2_net(
ref_net.Proto(),
{},
debug=True,
adjust_batch=False,
use_onnx=False,
weight_names=["W_int8", "b"],
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in net_onnxified.op
)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.CreateNet(net_onnxified)
workspace.RunNet(net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow, Y_fbgemm):
diff_Y = np.abs(Y_glow - Y_fbgemm)
print_test_debug_info(
"int8_fc",
{
"seed": rand_seed,
"n": n,
"X": X_fp32,
"W": W_fp32,
"b": b_fp32,
"Y_fbgemm": Y_fbgemm,
"Y_glow": Y_glow,
"diff": diff_Y,
"maxdiff": diff_Y.max(axis=1),
},
)
assert 0
@given(
n=st.integers(1, 1024),
m=st.integers(1, 1024),
k=st.integers(1, 1024),
f=st.integers(1, 1), # TODO: figure a safe number to increase
rand_seed=st.integers(0, 65534),
quantize_bias=st.sampled_from([False]),
)
@settings(deadline=datetime.timedelta(seconds=50))
def test_int8_fc(
self, n, m, k, rand_seed, quantize_bias, f
):
print(
f"n={n}, m={m}, k={k}, rand_seed={rand_seed}, quantize_bias={quantize_bias}"
)
np.random.seed(rand_seed)
workspace.ResetWorkspace()
ff = float(f)
X_fp32 = np.random.uniform(-ff, ff, size=(m, k)).astype(np.float32)
W_fp32 = np.random.uniform(-ff, ff, size=(n, k)).astype(np.float32)
b_fp32 = np.random.uniform(-ff, ff, size=(n)).astype(np.float32)
X_scale, X_zero_point = self._get_scale_zp(X_fp32)
Y_fp32 = np.dot(X_fp32, W_fp32.T) + b_fp32
Y_scale, Y_zero_point = self._get_scale_zp(Y_fp32)
workspace.FeedBlob("X", X_fp32)
workspace.FeedBlob("W", W_fp32)
workspace.FeedBlob("b", b_fp32)
workspace.RunOperatorOnce(
core.CreateOperator(
"Int8FCPackWeight",
["W", "b"] if quantize_bias else ["W"],
["W_int8", "b_int32"] if quantize_bias else ["W_int8"],
engine="DNNLOWP",
save_unpacked_weights=True,
in_scale=X_scale,
)
)
ref_net = core.Net("net")
ref_net.Int8QuantizeNNPI(
["X"],
["X_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point
)
ref_net.Int8FCFakeAcc32NNPI(
["X_int8", "W_int8", "b_int32" if quantize_bias else "b"],
["Y_int8"],
Y_scale=Y_scale,
Y_zero_point=Y_zero_point,
)
ref_net.Int8DequantizeNNPI(
["Y_int8"],
["Y"]
)
ref_net.Proto().external_output.append("Y")
# run ref_net
workspace.RunNetOnce(ref_net)
Y_fbgemm = workspace.FetchBlob("Y")
# run onnxifi net
ref_net.Proto().op[0].type = "Int8Quantize"
ref_net.Proto().op[1].type = "Int8FC"
ref_net.Proto().op[2].type = "Int8Dequantize"
net_onnxified = onnxifi_caffe2_net(
ref_net.Proto(),
{},
debug=True,
adjust_batch=False,
use_onnx=False,
weight_names=["W_int8", "b_int32"] if quantize_bias else ["W_int8", "b"],
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in net_onnxified.op
)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.CreateNet(net_onnxified)
workspace.RunNet(net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow, Y_fbgemm):
diff_Y = np.abs(Y_glow - Y_fbgemm)
print_test_debug_info(
"int8_fc",
{
"seed": rand_seed,
"n": n,
"m": m,
"k": k,
"X": X_fp32,
"W": W_fp32,
"b": b_fp32,
"Y_fbgemm": Y_fbgemm,
"Y_glow": Y_glow,
"diff": diff_Y,
"maxdiff": diff_Y.max(axis=1),
},
)
assert 0
@given(
n=st.integers(1, 4),
rand_seed=st.integers(0, 65534)
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_int8_small_input(self, n, rand_seed):
print("n={}, rand_seed={}".format(n, rand_seed))
np.random.seed(rand_seed)
workspace.ResetWorkspace()
X_fp32 = np.random.uniform(0.01, 0.03, size=(n, n)).astype(np.float32)
W_fp32 = np.identity(n, dtype=np.float32)
b_fp32 = np.zeros((n,), dtype=np.float32)
X_scale, X_zero_point = self._get_scale_zp(X_fp32)
workspace.FeedBlob("X", X_fp32)
workspace.FeedBlob("W", W_fp32)
workspace.FeedBlob("b", b_fp32)
workspace.RunOperatorOnce(
core.CreateOperator(
"Int8FCPackWeight",
["W"],
["W_int8"],
engine="DNNLOWP",
save_unpacked_weights=True,
in_scale=X_scale,
)
)
ref_net = core.Net("net")
ref_net.Int8QuantizeNNPI(
["X"],
["X_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point
)
ref_net.Int8FCFakeAcc32NNPI(
["X_int8", "W_int8", "b"],
["Y_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point,
)
ref_net.Int8DequantizeNNPI(
["Y_int8"],
["Y"]
)
ref_net.Proto().external_output.append("Y")
# run ref_net
workspace.RunNetOnce(ref_net)
Y_fbgemm = workspace.FetchBlob("Y")
# run onnxifi net
ref_net.Proto().op[0].type = "Int8Quantize"
ref_net.Proto().op[1].type = "Int8FC"
ref_net.Proto().op[2].type = "Int8Dequantize"
net_onnxified = onnxifi_caffe2_net(
ref_net.Proto(),
{},
debug=True,
adjust_batch=False,
use_onnx=False,
weight_names=["W_int8", "b"],
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in net_onnxified.op
)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.CreateNet(net_onnxified)
workspace.RunNet(net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow, Y_fbgemm):
diff_Y = np.abs(Y_glow - Y_fbgemm)
print_test_debug_info(
"int8_fc",
{
"seed": rand_seed,
"n": n,
"X": X_fp32,
"W": W_fp32,
"b": b_fp32,
"Y_fbgemm": Y_fbgemm,
"Y_glow": Y_glow,
"diff": diff_Y,
"maxdiff": diff_Y.max(axis=1),
},
)
assert 0
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_int8_ops_nnpi.py
|
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings, example
from hypothesis import strategies as st
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
# Test that parallel chunks behave the same way as the serial one
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1",
"--glow_nnpi_num_parallel_chunks=2",
"--glow_use_dag_optimizer=false",
"--glow_dump_graph=true",
]
)
class Fusions(serial.SerializedTestCase):
def _get_scale_zp(self, tensor):
tensor_max = np.max(tensor)
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))
if scale < 1e-6:
scale = np.float32(1e-6)
zero_point = 0 - tensor_min / scale
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)
@given(
scale=st.floats(1e-4, 1e2),
zp=st.integers(-128, 128),
rand_seed=st.integers(0, 65534),
m=st.integers(32, 64),
k=st.integers(1000, 6000),
n=st.integers(200, 600),
)
# @example(m=64, k=5423, n=553, scale=1e-3, zp=120, rand_seed=1)
@settings(deadline=datetime.timedelta(seconds=1000), max_examples=1)
def test_ParallelFC(self, m, k, n, scale, zp, rand_seed):
np.random.seed(rand_seed)
workspace.ResetWorkspace()
# Y = W_T * X + b
X_fp32 = np.random.uniform(-1, 1, size=(m, k)).astype(np.float16) \
.astype(np.float32)
W_fp32 = np.random.uniform(-1, 1, size=(n, k)).astype(np.float32)
b_fp32 = np.zeros((n,), dtype=np.float32)
X_scale, X_zero_point = self._get_scale_zp(X_fp32)
workspace.FeedBlob("X", X_fp32)
workspace.FeedBlob("W", W_fp32)
workspace.FeedBlob("b", b_fp32)
workspace.RunOperatorOnce(
core.CreateOperator(
"Int8FCPackWeight",
["W"],
["W_int8"],
engine="DNNLOWP",
save_unpacked_weights=True,
in_scale=X_scale,
)
)
ref_net = core.Net("net")
ref_net.Int8QuantizeNNPI(
["X"],
["X_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point
)
ref_net.Int8FCFakeAcc32NNPI(
["X_int8", "W_int8", "b"],
["Y_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point,
)
ref_net.Int8Relu(
["Y_int8"],
["Y_relu"],
Y_zero_point=X_zero_point,
Y_scale=X_scale,
)
ref_net.Int8DequantizeNNPI(
["Y_relu"],
["Y"]
)
ref_net.Proto().external_output.append("Y")
# run ref_net
workspace.RunNetOnce(ref_net)
Y_fbgemm = workspace.FetchBlob("Y")
# run onnxifi net
ref_net.Proto().op[0].type = "Int8Quantize"
ref_net.Proto().op[1].type = "Int8FC"
ref_net.Proto().op[2].type = "Int8Relu"
ref_net.Proto().op[3].type = "Int8Dequantize"
net_onnxified = onnxifi_caffe2_net(
ref_net.Proto(),
{},
debug=True,
adjust_batch=False,
use_onnx=False,
weight_names=["W_int8", "b"],
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in net_onnxified.op
)
print(net_onnxified)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.CreateNet(net_onnxified)
workspace.RunNet(net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow, Y_fbgemm):
diff_Y = np.abs(Y_glow - Y_fbgemm)
print_test_debug_info(
"int8_fc",
{
"seed": rand_seed,
"n": n,
"X": X_fp32,
"W": W_fp32,
"b": b_fp32,
"Y_fbgemm": Y_fbgemm,
"Y_glow": Y_glow,
"diff": diff_Y,
"maxdiff": diff_Y.max(axis=1),
},
)
assert 0
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_chunking.py
|
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
from hypothesis import settings
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
class DeqSwishQuantTest(serial.SerializedTestCase):
def _get_scale_zp(self, tensor):
tensor_max = np.max(tensor)
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16((tensor_max - tensor_min) / 255.))
zero_point = -tensor_min / scale
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)
def _sigmoid(self, x):
return 1. / (1. + np.exp(np.float32(-x)))
def _swish(self, x):
return np.float32(x) * self._sigmoid(x)
@settings(deadline=datetime.timedelta(seconds=10))
def test_swish_int8(self):
np.random.seed(0)
workspace.ResetWorkspace()
n = 256
X_fp32 = np.linspace(-20.5, 8., num=n).astype(np.float32).reshape(1, n)
Y_fp32 = self._swish(X_fp32)
X_scale, X_zero_point = self._get_scale_zp(X_fp32)
Y_scale, Y_zero_point = self._get_scale_zp(Y_fp32)
W_fp32 = np.identity(n, dtype=np.float32)
b_fp32 = np.zeros((n,), dtype=np.float32)
workspace.FeedBlob("X", X_fp32)
workspace.FeedBlob("W", W_fp32)
workspace.FeedBlob("b", b_fp32)
workspace.RunOperatorOnce(
core.CreateOperator(
"Int8FCPackWeight",
["W"],
["W_int8"],
engine="DNNLOWP",
save_unpacked_weights=True,
in_scale=X_scale,
)
)
ref_net1 = core.Net("net")
ref_net1.Int8QuantizeNNPI(
["X"],
["X_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point
)
ref_net1.Int8FCFakeAcc32NNPI(
["X_int8", "W_int8", "b"],
["U_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point,
)
ref_net1.SwishFakeInt8NNPI(
["U_int8"],
["Y"],
X_scale=X_scale,
X_zero_point=X_zero_point,
Y_scale=Y_scale,
Y_zero_point=Y_zero_point
)
ref_net1.Proto().external_output.append("Y")
ref_net = core.Net("net")
ref_net.Int8QuantizeNNPI(
["X"],
["X_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point
)
ref_net.Int8FCFakeAcc32NNPI(
["X_int8", "W_int8", "b"],
["U_int8"],
Y_scale=X_scale,
Y_zero_point=X_zero_point,
)
ref_net.Int8DequantizeNNPI(
["U_int8"],
["U_fp16"],
UsingOneOverScale=False
)
ref_net.SwishFakeFp16NNPI(
["U_fp16"],
["Y_fp16"]
)
ref_net.Int8QuantizeNNPI(
["Y_fp16"],
["Y"],
Y_scale=Y_scale,
Y_zero_point=Y_zero_point
)
ref_net.Proto().external_output.append("Y")
# run ref_net
workspace.RunNetOnce(ref_net1)
Y_fbgemm = workspace.FetchInt8Blob("Y")
# run onnxifi net
ref_net.Proto().op[0].type = "Int8Quantize"
ref_net.Proto().op[1].type = "Int8FC"
ref_net.Proto().op[2].type = "Int8Dequantize"
ref_net.Proto().op[3].type = "Swish"
ref_net.Proto().op[4].type = "Int8Quantize"
net_onnxified = onnxifi_caffe2_net(
ref_net.Proto(),
{},
debug=True,
adjust_batch=False,
use_onnx=False,
weight_names=["W_int8", "b"],
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in net_onnxified.op
)
np.testing.assert_equal(num_onnxified_ops, 1)
# TODO: add an assertion to check the optimized net
# fused Dequantize->Swish->Quantize to QuantizedSwish
workspace.CreateNet(net_onnxified)
workspace.RunNet(net_onnxified.name)
Y_glow = workspace.FetchInt8Blob("Y")
U_int8 = workspace.FetchInt8Blob("U_int8")
diff_Y = np.abs(Y_glow.data - Y_fbgemm.data)
num_mismatches = np.count_nonzero(diff_Y)
max_diff = np.max(diff_Y)
if max_diff > 0 or Y_glow.scale != Y_fbgemm.scale or \
Y_glow.zero_point != Y_fbgemm.zero_point:
print_test_debug_info(
"QuantizedSwish",
{
"X": X_fp32,
"X_scale": X_scale,
"X_zero_point": X_zero_point,
"Y_scale": Y_scale,
"Y_zero_point": Y_zero_point,
"U_int8": U_int8,
"Y_fbgemm": Y_fbgemm,
"Y_glow": Y_glow,
"diff": diff_Y,
"max_diff": max_diff,
"num_mismatches": num_mismatches,
},
)
assert 0
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_deq_swish_quant_nnpi.py
|
import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
GLOW_MATMUL_RTOL = 0
class FCTest(serial.SerializedTestCase):
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_clip(self, seed):
np.random.seed(seed)
m, n, k = 8, 8, 8
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0", "W1", "b1"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["X1"],
)
)
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X1", "W1", "b1"],
["Y"],
)
)
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=0', '--glow_global_fp16=1',
'--glow_clip_fp16', '--glow_global_fp16_constants=1'])
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.full((n, k), 65536.0, dtype)
b0 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
W1 = np.random.randint(low=1, high=3, size=(n, k)).astype(dtype)
b1 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
workspace.FeedBlob("W1", W1)
workspace.FeedBlob("b1", b1)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False
)
X = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
np.testing.assert_allclose(Y_glow, np.full((m, n), 65504.0, dtype))
@given(
m=st.integers(4, 50),
k=st.integers(4, 50),
n=st.integers(4, 50),
seed=st.integers(0, 65534)
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_exercise(self, m, k, n, seed):
""" Test that the matmul engine is working, this doesn't test
precision
"""
np.random.seed(seed)
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.random.randint(low=1, high=3, size=(n, k)).astype(dtype)
b0 = np.random.randint(low=1, high=3, size=(n)).astype(dtype)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X0 = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X0)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net)
num_iterations = 2
for _ in range(num_iterations):
X0 = np.random.randint(low=1, high=3, size=(m, k)).astype(dtype)
workspace.FeedBlob("X", X0)
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 net
workspace.RunNet(pred_net.name)
Y_c2 = workspace.FetchBlob('Y')
if not np.allclose(Y_c2, Y_glow):
print_test_debug_info("fc", {
"seed": seed,
"m": m,
"k": k,
"n": n,
"X": X0,
"W0": W0,
"b0": b0,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": np.abs((Y_c2 - Y_glow) / Y_c2)})
assert(0)
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_numeric_cases(self, seed):
""" Test numerics, use examples found from the unit test.
Use Fp16FCAcc16NNPI as a reference.
"""
np.random.seed(seed)
m = 1
k = 20
n = 1
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "W0", "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FC",
["X", "W0", "b0"],
["Y"],
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", "W0", "b0"])
pred_net_ref.external_output.append("Y")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"Fp16FCAcc32NNPI",
["X", "W0", "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = np.array([[0.04882812, 0.21520996, 0.1027832, 0.04489136,
-0.07635498, 0.14587402,
-0.06240845, 0.3918457, 0.46362305, -0.11657715,
0.29174805, 0.02890015,
0.0680542, 0.4255371, -0.42895508, -0.4128418,
-0.47973633, 0.33251953,
0.27807617, 0.3701172]], dtype=np.float32)
b0 = np.array([0.47851562], dtype=np.float32)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X_inputs = [
np.array([[
-2.94921875e-01, -3.58642578e-01, -1.92871094e-01,
2.81250000e-01, -1.30126953e-01, 2.32696533e-02,
-4.55566406e-01, -2.31811523e-01, -1.95190430e-01,
-7.76977539e-02, -1.29394531e-01, 2.94677734e-01,
8.96453857e-04, 4.97314453e-01, -6.07604980e-02,
2.55371094e-01, 3.49853516e-01, -1.37695312e-01,
2.95410156e-01, -3.67187500e-01]], dtype=np.float32),
np.array([[
-0.4494629, -0.22192383, -0.1640625, 0.11480713,
-0.09851074, -0.02084351,
0.19091797, -0.17468262, -0.47485352, 0.07489014,
0.03897095, 0.00197601,
0.02835083, -0.27294922, 0.26757812, -0.20996094,
-0.31103516, -0.41601562,
0.09918213, -0.07696533]], dtype=np.float32),
np.array([[
0.01150513, -0.20507812, 0.46704102, 0.00906372,
0.19848633, 0.3720703,
0.46557617, -0.47436523, -0.35107422, -0.0362854,
-0.20812988, 0.41918945,
0.09716797, 0.19897461, 0.3876953, -0.0165863,
0.23535156, 0.29956055,
0.24389648, -0.23486328]], dtype=np.float32)
]
# keep onnxifi happy by feeding something with a shape
workspace.FeedBlob("X", X_inputs[0])
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
for i in range(len(X_inputs)):
workspace.FeedBlob("X", X_inputs[i])
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs((Y_c2 - Y_glow) / (Y_c2 + 1e-8))
rowdiff = np.max(diff, axis=1)
n_offenders = np.count_nonzero(rowdiff[rowdiff > GLOW_MATMUL_RTOL])
if n_offenders > 0:
print_test_debug_info("fc", {
"seed": seed,
"iter": i,
"m": m,
"k": k,
"n": n,
"W0": W0,
"b0": b0,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": diff,
"rowdiff": rowdiff})
assert(0)
@given(
m=st.integers(1, 50),
k=st.integers(1, 1000),
n=st.integers(1, 50),
seed=st.integers(0, 65534),
use_packed=st.integers(0, 2)
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_fc_num0(self, seed, m, k, n, use_packed):
""" Test numerics, fix a dimension and determine the ranges of error.
Use Fp16FCAcc16 as a reference.
"""
W = "W_packed" if use_packed else "W0"
dtype = np.float32
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", W, "b0"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"FbFCPacked" if use_packed else "FC",
["X", W, "b0"],
["Y"],
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", W, "b0"])
pred_net_ref.external_output.append("Y")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"Fp16FCAcc32NNPI",
["X", W, "b0"],
["Y"],
)
)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.ResetWorkspace()
W0 = 10 * (np.random.rand(n, k) - 0.5).astype(np.float16).astype(np.float32)
b0 = 1 * (np.random.rand(n) - 0.5).astype(np.float16).astype(np.float32)
workspace.FeedBlob("W0", W0)
workspace.FeedBlob("b0", b0)
workspace.RunOperatorOnce(
core.CreateOperator(
"FbGemmPack",
['W0'],
['W_packed'],
no_packing=True,
)
)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": (m, k)},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
X0 = np.random.rand(m, k).astype(dtype) - 0.5
workspace.FeedBlob("X", X0)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 net
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs((Y_c2 - Y_glow) / (Y_c2 + 1e-8))
rowdiff = np.max(diff, axis=1)
n_offenders = np.count_nonzero(rowdiff[rowdiff > GLOW_MATMUL_RTOL])
if n_offenders > 0:
print_test_debug_info("fc", {
"seed": seed,
"use_packed": use_packed,
"m": m,
"k": k,
"n": n,
"X": X0.shape,
"W0": W0.shape,
"b0": b0.shape,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff": diff,
"rowdiff": rowdiff})
assert(0)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_fc_nnpi_fp16.py
|
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=0",
"--glow_global_fused_scale_offset_fp16=0",
"--glow_global_force_sls_fp16_accum=0",
]
)
GLOW_MATMUL_ATOL = 1e-5
GLOW_MATMUL_RTOL = 1e-3
class SparseLengthsSum8BitFakeNNPIFp32Test(serial.SerializedTestCase):
@given(
seed=st.integers(0, 65535),
num_rows=st.integers(2, 20),
embedding_dim=st.sampled_from([8, 12, 16, 24, 32, 54, 64, 128]),
batch_size=st.integers(1, 5),
max_weight=st.integers(0, 100),
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_slws_fused_8bit_rowwise_acc32_nnpi(
self, seed, num_rows, embedding_dim, batch_size, max_weight
):
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=0",
"--glow_global_fused_scale_offset_fp16=0",
"--glow_global_force_sls_fp16_accum=0",
]
)
workspace.ResetWorkspace()
np.random.seed(seed)
data = np.random.rand(num_rows, embedding_dim).astype(np.float32)
lengths = np.random.choice(np.arange(1, num_rows), batch_size).astype(np.int32)
_indices = []
for length in lengths:
_indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(_indices).astype(np.int64)
weights = np.random.uniform(
low=0,
high=max_weight,
size=[len(indices)]
).astype(np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwiseFakeFP32NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused8BitRowwiseQuantized",
["data"],
["quantized_data"]
)
)
onnxified_net = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=batch_size,
max_seq_size=np.max(lengths),
debug=True,
adjust_batch=True,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in onnxified_net.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(onnxified_net)
workspace.CreateNet(ref_net)
workspace.RunNet(onnxified_net.name)
Y_glow = workspace.FetchBlob("Y")
workspace.RunNet(ref_net.name)
Y_ref = workspace.FetchBlob("Y")
diff = np.abs((Y_ref - Y_glow) / (Y_ref + 1e-8))
max_err = np.max(diff, axis=1)
num_offenders = (max_err > 0).sum()
if num_offenders > 0:
print_test_debug_info(
"test_slws_fused_8bit_rowwise_acc32_nnpi",
{
"seed": seed,
"num_rows": num_rows,
"embedding_dim": embedding_dim,
"batch_size": batch_size,
"indices": indices,
"data": data.shape,
"lengths": lengths,
"weights": weights,
"Y_glow": Y_glow,
"Y_ref": Y_ref,
"diff": diff,
"rowwise_diff": np.max(diff, axis=1),
},
)
assert 0
@given(seed=st.integers(0, 65535))
@settings(deadline=datetime.timedelta(seconds=10))
def test_small_sls_acc32(self, seed):
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=0",
"--glow_global_fused_scale_offset_fp16=0",
"--glow_global_force_sls_fp16_accum=0",
]
)
np.random.seed(seed)
workspace.ResetWorkspace()
n = 2
DIM = 3
data = 4 * (np.random.random_sample((n, DIM)) + 1).astype(np.float32)
lengths = np.array([n], dtype=np.int32)
indices = np.array(range(n), dtype=np.int64)
weights = np.random.uniform(low=0.01, high=0.5, size=[n]).astype(np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwiseFakeFP32NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused8BitRowwiseQuantized", ["data"], ["quantized_data"]
)
)
quantized_data = workspace.FetchBlob("quantized_data")
onnxified_net = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=1,
max_seq_size=n,
debug=True,
adjust_batch=True,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in onnxified_net.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(onnxified_net)
workspace.CreateNet(ref_net)
workspace.RunNet(onnxified_net.name)
Y_glow = workspace.FetchBlob("Y")
workspace.RunNet(ref_net.name)
Y_ref = workspace.FetchBlob("Y")
diff = np.abs((Y_ref - Y_glow) / (Y_ref + 1e-8))
max_err = np.max(diff, axis=1)
num_offenders = (max_err > 0).sum()
if num_offenders > 0:
np.set_printoptions(precision=12)
print(
"ref",
Y_ref.astype(np.float16).astype(np.float32),
"glow",
Y_glow.astype(np.float16).astype(np.float32),
)
print_test_debug_info(
"test_small_sls_acc32",
{
"seed": seed,
"indices": indices,
"data": data,
"quantized_data": quantized_data,
"lengths": lengths,
"weights": weights,
"Y_glow": Y_glow,
"Y_ref": Y_ref,
"diff": diff,
"rowwise_diff": np.max(diff, axis=1),
},
)
assert 0
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_sls_8bit_nnpi_fp32.py
|
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from caffe2.python.fakelowp.test_utils import compute_ulp_error
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
kEpsilon = 1e-8
class ArithmeticOpsTest(serial.SerializedTestCase):
def _test_binary_op_graph(self, name, seed):
np.random.seed(seed)
workspace.ResetWorkspace()
# First dimension is the batch size
dims = np.concatenate((np.array([1]), np.random.randint(1, 20, size=3)))
A = np.random.uniform(low=-100.0, high=100.0, size=dims).astype(np.float32)
B = np.random.uniform(low=-100.0, high=100.0, size=dims).astype(np.float32)
# Avoid dividing by 0
B[np.abs(B) < 1e-3] = 1e-3
print(A.shape, B.shape)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["A", "B"])
pred_net.external_output.append("C")
pred_net.op.add().CopyFrom(
core.CreateOperator(
name,
["A", "B"],
["C"]
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "ref"
pred_net_ref.external_input.extend(["A", "B"])
pred_net_ref.external_output.append("C_ref")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
name + "FakeFp16",
["A", "B"],
["C_ref"],
)
)
shape_hints = {"A": A.shape, "B": B.shape}
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
shape_hints,
debug=True,
adjust_batch=True,
use_onnx=False)
print(pred_net_onnxified)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.FeedBlob("A", A)
workspace.FeedBlob("B", B)
workspace.CreateNet(pred_net_ref)
workspace.CreateNet(pred_net_onnxified)
num_iterations = 10
for _ in range(num_iterations):
A = np.random.uniform(low=-100.0, high=100.0, size=dims).astype(np.float32)
B = np.random.uniform(low=-100.0, high=100.0, size=dims).astype(np.float32)
# Avoid dividing by 0
B[np.abs(B) < 1e-3] = 1e-3
workspace.FeedBlob("A", A)
workspace.FeedBlob("B", B)
# Run caffe2 net
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob("C_ref")
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("C")
Y_glow[Y_glow == np.Inf] = np.finfo(np.float16).max
Y_glow[Y_glow == np.NINF] = np.finfo(np.float16).min
# Ignore mismatches solely due to difference in precision
fp16_finite = np.isfinite(A.astype(np.float16) / B.astype(np.float16))
# Results should be identical since we are comparing with the C2 emulation
if not np.allclose(Y_c2[fp16_finite], Y_glow[fp16_finite]):
diff = np.abs((Y_glow - Y_c2) / (Y_c2 + kEpsilon))
print_test_debug_info(name, {
"dims": dims, "iter": _, "seed": seed, "A": A, "B": B,
"Y_glow": Y_glow, "Y_c2": Y_c2, "diff": diff})
assert(0)
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_add_graph(self, seed):
self._test_binary_op_graph("Add", seed)
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_sub_graph(self, seed):
self._test_binary_op_graph("Sub", seed)
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_mul_graph(self, seed):
self._test_binary_op_graph("Mul", seed)
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_div_graph(self, seed):
self._test_binary_op_graph("Div", seed)
class UnaryOpTest(serial.SerializedTestCase):
def _test_unary_op(self, opname, X, rtol=1e-5, atol=1e-8):
workspace.ResetWorkspace()
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.append("X")
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
opname,
['X'],
['Y'])
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.append("X")
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
opname + 'FakeFp16NNPI',
['X'],
['Y'])
)
print("REF NET = {}".format(ref_net))
shape_hints = {"X": X.shape}
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
shape_hints,
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.FeedBlob("X", X)
workspace.CreateNet(ref_net)
workspace.CreateNet(pred_net_onnxified)
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 reference net
workspace.RunNet(ref_net.name)
Y_c2 = workspace.FetchBlob('Y')
if not np.allclose(Y_c2, Y_glow, rtol=atol, atol=atol):
diff = np.abs(Y_c2 - Y_glow)
np.save('/tmp/' + opname + 'diff', diff)
np.save('/tmp/' + opname + 'result', Y_c2)
print_test_debug_info(opname, {
"X": X,
"Y_c2": Y_c2,
"Y_glow": Y_glow,
"diff": diff
})
assert(0)
return Y_glow
def _test_op_w_ulp_error(self, seed, opname, regions, atol=0, err_threshold=2):
ulp_err = 0
for x0, x1 in regions:
X = np.linspace(x0, x1, num=1025, dtype=np.float16).astype(np.float32)
Y_glow = self._test_unary_op(opname, X, atol=atol)
region_err = compute_ulp_error(opname, X, Y_glow)
ulp_err = max(np.max(np.abs(region_err)), ulp_err)
if (ulp_err > err_threshold):
print(r'{} Op detected ulp_err={}'.format(opname, ulp_err))
assert(0)
# These tests doesn't need to run multiple times given that it is a
# linear sweep and it is deterministic.
# Once hypothesis.testing version is updated, we can re-enable
# testing with different hypothesis examples.
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=20))
def test_sigmoid(self, seed):
np.random.seed(seed)
opname = "Sigmoid"
regions = [[-8., -4.], [-4., -2.], [-2., -1.], [-1., -.5], [-.5, -.25],
[-.25, .25], [.25, .5], [.5, 1.], [1., 2.], [2., 4.],
[4., 8.]]
self._test_op_w_ulp_error(seed, opname, regions, atol=0, err_threshold=2.5)
# These tests doesn't need to run multiple times given that it is a
# linear sweep and it is deterministic.
# Once hypothesis.testing version is updated, we can re-enable
# testing with different hypothesis examples.
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=20))
def test_tanh(self, seed):
np.random.seed(seed)
opname = "Tanh"
regions = [[2.**(-9), 2.**(-8)], [2.**(-8), 2.**(-7)],
[2.**(-7), 2.**(-6)], [2.**(-6), 2.**(-5)],
[2.**(-5), 2.**(-4)], [2.**(-4), 2.**(-3)],
[2.**(-3), 2.**(-2)], [2.**(-2), 2.**(-1)],
[2.**(-1), 1.], [1., 2.], [2., 4.], [4., 8.]]
self._test_op_w_ulp_error(seed, opname, regions, atol=0, err_threshold=2)
# These tests doesn't need to run multiple times given that it is a
# linear sweep and it is deterministic.
# Once hypothesis.testing version is updated, we can re-enable
# testing with different hypothesis examples.
# TODO: move atol to 1e-8 once we get a non-lowered swish implementation
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_swish(self, seed):
np.random.seed(seed)
opname = "Swish"
regions = [[-20.5, -11.], [-11., -8.], [-8., -1.], [-1., -0.1],
[-1. / 8., 1. / 8.], [1. / 8, 5.], [5., 8.]]
self._test_op_w_ulp_error(seed, opname, regions, atol=0.008, err_threshold=384)
# These tests doesn't need to run multiple times given that it is a
# linear sweep and it is deterministic.
# Once hypothesis.testing version is updated, we can re-enable
# testing with different hypothesis examples.
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def test_logit(self, seed):
np.random.seed(seed)
workspace.ResetWorkspace()
n = 1
m = 15361
X = np.linspace(0, 1, num=m, dtype=np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.append("X")
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
'Logit',
['X'],
['Y'],
eps=1e-6)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.append("X")
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
'LogitFakeFp16NNPI',
['X'],
['Y'],
eps=1e-6)
)
print("REF NET = {}".format(ref_net))
shape_hints = {"X": (n, m)}
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
shape_hints,
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.FeedBlob("X", X)
workspace.CreateNet(ref_net)
workspace.CreateNet(pred_net_onnxified)
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
# Run caffe2 reference net
workspace.RunNet(ref_net.name)
Y_c2 = workspace.FetchBlob('Y')
diff = np.abs(Y_c2 - Y_glow)
if np.nanmax(diff) > 9e-3:
np.save('/tmp/logit_diff', diff)
np.save('/tmp/logit_result', Y_c2)
print_test_debug_info('Logit', {
"X": X,
"Y_c2": Y_c2,
"Y_glow": Y_glow,
"diff": diff
})
assert(0)
class ReluTest(serial.SerializedTestCase):
@given(seed=st.integers(0, 65534))
@settings(deadline=datetime.timedelta(seconds=10))
def relu_test(self, inputs, gc, dc, seed):
np.random.seed(seed)
inputs = np.random.rand(1).astype(np.float32)
X = inputs[0]
# First dimension is the batch size
print(X.shape)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"Relu",
["X"],
["Y"]
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "ref"
pred_net_ref.external_input.extend(["X"])
pred_net_ref.external_output.append("Y_ref")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"ReluFakeFp16",
["X"],
["Y_ref"],
)
)
shape_hints = {"X": X.shape}
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
shape_hints,
debug=True,
adjust_batch=True,
use_onnx=False)
print(pred_net_onnxified)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.SwitchWorkspace("glow_test_ws", True)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_ref)
workspace.CreateNet(pred_net_onnxified)
workspace.FeedBlob("X", X)
# Run caffe2 net
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob("Y_ref")
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
# Results should be identical since we are comparing with the C2 emulation
if not np.allclose(Y_c2, Y_glow):
diff = np.abs((Y_glow - Y_c2) / (Y_c2 + kEpsilon))
print_test_debug_info("Relu", {
"seed": seed, "X": X,
"Y_glow": Y_glow, "Y_c2": Y_c2, "diff": diff})
assert(0)
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_op_nnpi_fp16.py
|
import unittest
from typing import Dict, Any
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1",
]
)
GLOW_MATMUL_ATOL = 1e-5
GLOW_MATMUL_RTOL = 1e-3
class SparseLengthsSum8BitFakeNNPIFp16Test(serial.SerializedTestCase):
def Skip_test_SLS_NonQuantized_fp16(self):
N = 20000
DIM = 64
D = (4 * np.random.random_sample((N, DIM)) + 1).astype(np.float32)
I = (np.random.randint(0, N, size=12)).astype(np.int64)
L = np.asarray([4, 4, 4]).astype(np.int32)
workspace.FeedBlob("D", D)
ref_c2_net = core.Net("test_ref_c2")
ref_c2_net.SparseLengthsSum(["D", "I", "L"], "ref_out")
ref_c2_net.Proto().external_input.extend(["D", "I", "L"])
ref_c2_net.Proto().external_output.extend(["ref_out"])
fp16_c2_net = core.Net("test_fp16_c2")
fp16_c2_net.SparseLengthsSumFakeFP16AccFP16(["D", "I", "L"], "fp16_out")
input_dict : Dict[Any, Any] = {}
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["D", "I", "L"])
pred_net.external_output.append("glow_out")
pred_net.op.add().CopyFrom(
core.CreateOperator("SparseLengthsSum", ["D", "I", "L"], ["glow_out"])
)
onnxified_net = onnxifi_caffe2_net(
pred_net,
input_dict,
max_batch_size=3,
max_seq_size=16,
debug=True,
adjust_batch=False,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if op.type == "Onnxifi" else 0 for op in onnxified_net.op
)
print(onnxified_net)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("I", I)
workspace.FeedBlob("L", L)
workspace.RunNetOnce(ref_c2_net)
ref_c2_out = workspace.FetchBlob("ref_out")
workspace.RunNetOnce(fp16_c2_net)
fp16_c2_out = workspace.FetchBlob("fp16_out")
np.testing.assert_allclose(fp16_c2_out, ref_c2_out, atol=1e-3, rtol=1e-3)
workspace.RunNetOnce(onnxified_net)
fp16_glow_out = workspace.FetchBlob("glow_out")
if not np.allclose(fp16_glow_out, fp16_c2_out):
diff = np.abs(fp16_glow_out - fp16_c2_out)
print_test_debug_info(
"sls",
{
"indices": I,
"data": D,
"lengths": L,
"Y_c2": fp16_c2_out,
"Y_glow": fp16_glow_out,
"diff": diff,
"rowwise_diff": diff[:, 0],
},
)
assert 0
@given(seed=st.integers(0, 65535))
@settings(deadline=datetime.timedelta(seconds=10))
def test_slws_fused_8bit_rowwise_all_same(self, seed):
# Comment out for predictable debugging
np.random.seed(seed)
workspace.ResetWorkspace()
n = 1
m = 2
data = np.ones((n, m)).astype(np.float32) * 0.2 - 0.1
max_segments = 5
max_segment_length = 200
num_lengths = np.random.randint(1, max_segments + 1)
# number of segments to run
lengths = np.random.randint(0, max_segment_length + 1, size=num_lengths).astype(
np.int32
)
num_indices = np.sum(lengths)
indices = np.zeros(num_indices, dtype=np.int64)
weights = np.random.uniform(low=-0.5, high=0.5, size=[len(indices)]).astype(
np.float32
)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwiseFakeFP16NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused8BitRowwiseQuantized", ["data"], ["quantized_data"]
)
)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=max_segments,
max_seq_size=max_segment_length,
debug=True,
adjust_batch=True,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op
)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(ref_net)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
workspace.RunNet(ref_net.name)
Y_c2 = workspace.FetchBlob("Y")
if not np.allclose(Y_c2, Y_glow):
print_test_debug_info(
"slws_fused_8bit_rowwise",
{
"seed": seed,
"indices": indices,
"data": data,
"lengths": lengths,
"weights": weights,
"Y_c2": Y_c2,
"Y_glow": Y_glow,
"diff": Y_glow - Y_c2,
"rowwise_diff": (Y_glow - Y_c2)[:, 0],
},
)
assert 0
@given(
seed=st.integers(0, 65535),
num_rows=st.integers(2, 20),
embedding_dim=st.sampled_from([8, 12, 16, 24, 32, 54, 64, 128]),
batch_size=st.integers(1, 5),
max_weight=st.integers(0, 100),
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_slws_fused_8bit_rowwise(self, seed, num_rows, embedding_dim, batch_size, max_weight):
np.random.seed(seed)
workspace.ResetWorkspace()
data = np.random.rand(num_rows, embedding_dim).astype(np.float32)
lengths = np.random.choice(np.arange(1, num_rows), batch_size).astype(np.int32)
_indices = []
for length in lengths:
_indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(_indices).astype(np.int64)
weights = np.random.uniform(
low=0,
high=max_weight,
size=[len(indices)]
).astype(np.float32)
assert(len(weights) < 64000)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwiseFakeFP16NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused8BitRowwiseQuantized", ["data"], ["quantized_data"]
)
)
onnxified_net = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=batch_size,
max_seq_size=np.max(lengths),
debug=True,
adjust_batch=True,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in onnxified_net.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(onnxified_net)
workspace.CreateNet(ref_net)
workspace.RunNet(onnxified_net.name)
Y_glow = workspace.FetchBlob("Y")
workspace.RunNet(ref_net.name)
Y_ref = workspace.FetchBlob("Y")
diff = np.abs((Y_ref - Y_glow) / (Y_ref + 1e-8))
max_err = np.max(diff, axis=1)
num_offenders = (max_err > 0).sum()
if num_offenders > 0:
print_test_debug_info(
"slws_fused_8bit_rowwise_inv_scale",
{
"seed": seed,
"num_rows": num_rows,
"embedding_dim": embedding_dim,
"batch_size": batch_size,
"max_weight": max_weight,
"indices": indices,
"data": data.shape,
"lengths": lengths,
"weights": weights,
"Y_glow": Y_glow,
"Y_ref": Y_ref,
"diff": diff,
"rowwise_diff": np.max(diff, axis=1),
},
)
assert 0
# Simple test to aid debugging order of operations
# Minimize the case to an SLS that adds two rows
@given(seed=st.integers(0, 65535))
@settings(deadline=datetime.timedelta(seconds=10))
def test_small_sls(self, seed):
np.random.seed(seed)
workspace.ResetWorkspace()
n = 2
DIM = 3
data = 4 * (np.random.random_sample((n, DIM)) + 1).astype(np.float32)
lengths = np.array([n], dtype=np.int32)
indices = np.array(range(n), dtype=np.int64)
weights = np.random.uniform(low=0.01, high=0.5, size=[n]).astype(np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwiseFakeFP16NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused8BitRowwiseQuantized", ["data"], ["quantized_data"]
)
)
quantized_data = workspace.FetchBlob("quantized_data")
onnxified_net = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=1,
max_seq_size=n,
debug=True,
adjust_batch=True,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in onnxified_net.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(onnxified_net)
workspace.CreateNet(ref_net)
workspace.RunNet(onnxified_net.name)
Y_glow = workspace.FetchBlob("Y")
workspace.RunNet(ref_net.name)
Y_ref = workspace.FetchBlob("Y")
diff = np.abs((Y_ref - Y_glow) / (Y_ref + 1e-8))
max_err = np.max(diff, axis=1)
num_offenders = (max_err > 0).sum()
if num_offenders > 0:
np.set_printoptions(precision=12)
print(
"ref",
Y_ref.astype(np.float16).astype(np.float32),
"glow",
Y_glow.astype(np.float16).astype(np.float32),
)
print_test_debug_info(
"slws_fused_8bit_rowwise_inv_scale",
{
"seed": seed,
"indices": indices,
"data": data,
"quantized_data": quantized_data,
"lengths": lengths,
"weights": weights,
"Y_glow": Y_glow,
"Y_ref": Y_ref,
"diff": diff,
"rowwise_diff": np.max(diff, axis=1),
},
)
assert 0
@given(seed=st.integers(0, 65535))
@settings(deadline=datetime.timedelta(seconds=10))
def test_sls_layernorm(self, seed):
np.random.seed(seed)
workspace.ResetWorkspace()
n = 2
DIM = 3
data = 4 * (np.random.random_sample((n, DIM)) + 1).astype(np.float32)
lengths = np.array([n], dtype=np.int32)
indices = np.array(range(n), dtype=np.int64)
weights = np.random.uniform(low=0.01, high=0.5, size=[n]).astype(np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
pred_net.external_output.append("Y_norm")
pred_net.external_output.append("Y_mean")
pred_net.external_output.append("Y_std")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
pred_net.op.add().CopyFrom(
core.CreateOperator(
"LayerNorm",
["Y"],
["Y_norm", "Y_mean", "Y_std"],
epsilon=1e-4,
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"]
)
ref_net.external_output.append("Y_norm")
ref_net.external_output.append("Y_mean")
ref_net.external_output.append("Y_std")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused8BitRowwiseFakeFP16NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net.op.add().CopyFrom(
core.CreateOperator(
"LayerNormFakeFP16NNPI",
["Y"],
["Y_norm", "Y_mean", "Y_std"],
epsilon=1e-4,
axis=1,
elementwise_affine=False
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused8BitRowwiseQuantized", ["data"], ["quantized_data"]
)
)
quantized_data = workspace.FetchBlob("quantized_data")
onnxified_net = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=1,
max_seq_size=n,
debug=True,
adjust_batch=True,
use_onnx=False,
)
print("before", pred_net)
print("after", onnxified_net)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(onnxified_net)
workspace.CreateNet(ref_net)
workspace.RunNet(onnxified_net.name)
Y_glow = workspace.FetchBlob("Y_norm")
Y_mean_glow = workspace.FetchBlob("Y_mean")
Y_std_glow = workspace.FetchBlob("Y_std")
workspace.RunNet(ref_net.name)
Y = workspace.FetchBlob("Y")
print("pre normalization", Y)
Y_ref = workspace.FetchBlob("Y_norm")
Y_mean_ref = workspace.FetchBlob("Y_mean")
Y_std_ref = workspace.FetchBlob("Y_std")
# print(Y_ref, Y_glow)
# print(Y_ref.shape, Y_glow.shape)
diff = np.abs(Y_ref - Y_glow)
max_err = np.max(diff, axis=1)
num_offenders = (max_err > 0).sum()
if num_offenders > 0:
np.set_printoptions(precision=12)
print(
"ref",
Y_ref.astype(np.float16).astype(np.float32),
"glow",
Y_glow.astype(np.float16).astype(np.float32),
)
print_test_debug_info(
"slws_fused_8bit_rowwise_inv_scale",
{
"seed": seed,
"indices": indices,
"data": data,
"quantized_data": quantized_data,
"lengths": lengths,
"weights": weights,
"Y_norm_glow": Y_glow,
"Y_norm_ref": Y_ref,
"Y_mean_glow": Y_mean_glow,
"Y_std_glow": Y_std_glow,
"Y_mean_ref": Y_mean_ref,
"Y_std_ref": Y_std_ref,
"diff": diff,
"rowwise_diff": np.max(diff, axis=1),
},
)
assert 0
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_sls_8bit_nnpi_fp16.py
|
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import settings
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=0",
"--glow_global_fused_scale_offset_fp16=0",
"--glow_global_force_sls_fp16_accum=0",
]
)
class QuantTest(serial.SerializedTestCase):
@settings(deadline=datetime.timedelta(seconds=10))
def test_dequantize(self):
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.append("X")
pred_net.external_output.append("Y")
x_scale = 0.10000000149011612
pred_net.op.add().CopyFrom(
core.CreateOperator(
"Int8Quantize", ["X"], ["I"], Y_scale=x_scale, Y_zero_point=0
)
)
pred_net.op.add().CopyFrom(
core.CreateOperator(
"Int8Dequantize", ["I"], ["Y"],
)
)
print(pred_net)
X = np.asarray([[1, 0], [0, 1]]).astype(np.float32)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net)
workspace.RunNet(pred_net.name)
Y_ref = workspace.FetchBlob("Y")
workspace.ResetWorkspace()
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": [5, 2]},
debug=True,
adjust_batch=True,
block_list=[0],
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op
)
np.testing.assert_equal(len(pred_net_onnxified.op), 2)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
np.testing.assert_equal(Y_ref, Y_glow)
@settings(deadline=datetime.timedelta(seconds=20))
def test_quantize(self):
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.append("X")
pred_net.external_output.append("Y")
x_scale = 0.10000000149011612
pred_net.op.add().CopyFrom(
core.CreateOperator(
"Int8Quantize", ["X"], ["Y"], Y_scale=x_scale, Y_zero_point=0
)
)
print(pred_net)
X = np.asarray([[1, 0], [0, 1]]).astype(np.float32)
workspace.FeedBlob("X", X)
workspace.RunNetOnce(pred_net)
Y_ref = workspace.FetchInt8Blob("Y")
workspace.ResetWorkspace()
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": [2, 2]},
debug=True,
adjust_batch=False,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op
)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchInt8Blob("Y")
np.testing.assert_equal(Y_ref.data, Y_glow.data)
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_int8_quant.py
|
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
import datetime
import numpy as np
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
workspace.GlobalInit(
[
"caffe2",
"--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1",
]
)
class Fusions(serial.SerializedTestCase):
@given(
scale=st.floats(1e-4, 1e2),
zp=st.integers(-128, 128),
size=st.integers(1, 100000),
rand_seed=st.integers(0, 65534),
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_tanhquantize(self, scale, zp, size, rand_seed):
np.random.seed(rand_seed)
workspace.ResetWorkspace()
pred_net = caffe2_pb2.NetDef()
pred_net.name = "ref"
pred_net.external_input.append("X")
pred_net.external_output.append("Y_q")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"Tanh", ["X"], ["Y"]
)
)
pred_net.op.add().CopyFrom(
core.CreateOperator(
"Int8Quantize", ["Y"], ["Y_q"], Y_scale=scale, Y_zero_point=zp
)
)
X = np.linspace(-1, 1, size).astype(np.float16).astype(np.float32)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": X.shape},
debug=True,
adjust_batch=False,
use_onnx=False,
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op
)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchInt8Blob("Y_q")
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.append("X")
ref_net.external_output.append("Y_q")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"TanhQuantFakeFp16NNPI", ["X"], ["Y_q"], Y_scale=scale, Y_zero_point=zp
)
)
workspace.CreateNet(ref_net)
workspace.RunNet(ref_net.name)
Y_ref = workspace.FetchInt8Blob("Y_q")
if not np.array_equal(Y_ref.data, Y_glow.data) or \
not Y_ref.scale == Y_glow.scale or \
not Y_ref.zero_point == Y_glow.zero_point:
print_test_debug_info(
"tanhfusion",
{
"scale": scale,
"zp": zp,
"input": X,
"ideal nonquant": np.tanh(X),
"Y_glow": Y_glow,
"Y_c2": Y_ref,
}
)
assert(0)
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_fusions.py
|
pytorch-master
|
caffe2/contrib/script/__init__.py
|
|
pytorch-master
|
caffe2/contrib/script/examples/__init__.py
|
|
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, dyndep, workspace
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/prof:cuda_profile_ops")
class CudaProfileOpsTest(unittest.TestCase):
@unittest.skipIf(workspace.NumCudaDevices() < 1, "Need at least 1 GPU")
def test_run(self):
net = core.Net("net")
net.CudaProfileInitialize([], [], output="/tmp/cuda_profile_test")
net.CudaProfileStart([], [])
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
net.ConstantFill([], ["out"], shape=[1, 3, 244, 244])
net.CudaProfileStop([], [])
workspace.CreateNet(net)
workspace.RunNet(net)
|
pytorch-master
|
caffe2/contrib/prof/cuda_profile_ops_test.py
|
pytorch-master
|
caffe2/contrib/prof/__init__.py
|
|
pytorch-master
|
caffe2/contrib/tensorboard/__init__.py
|
|
from builtins import bytes
import copy
import logging
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
try:
# tensorboard>=1.14.0
from tensorboard.compat.proto import tensor_shape_pb2
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.graph_pb2 import GraphDef
except ImportError:
from tensorflow.core.framework import tensor_shape_pb2
try:
# tensorflow>=1.0.0
from tensorflow import NodeDef, GraphDef
except ImportError:
# tensorflow<=0.12.1
from tensorflow.core.framework.graph_pb2 import NodeDef, GraphDef
def _make_unique_name(seen, name, min_version=0):
assert name is not None
i = min_version
x = '%s_%d' % (name, i) if i else name
while x in seen:
i += 1
x = '%s_%d' % (name, i)
seen.add(x)
return x
def _convert_to_ssa(shapes, track_blob_names, ops):
"""
Convert an operator graph to SSA (i.e. out-of-place).
I.e. blobs will be renamed so that each blob is produced only once.
"""
ir = core.IR(ops)
seen = set()
versioned = {}
shapes2 = {}
track_blob_names2 = {}
def ssa_name(name, versions):
assert name in versions
version = versions[name]
if (name, version) in versioned:
return versioned[(name, version)]
# Always setting name2 = `{name}_{version}` would work, but we also try
# to avoid a trailing `_0`, so we have to be careful not to introduce
# name collisions, such as (foo_1, 0) = foo_1 = (foo, 1).
# Note: operator names (if any) will be handled later.
name2 = _make_unique_name(seen, name, min_version=version)
versioned[(name, version)] = name2
# Transfer shape.
if name in shapes:
shapes2[name2] = shapes[name]
if track_blob_names and name in track_blob_names:
track_blob_names2[name2] = track_blob_names[name]
return name2
for (op, ssa) in zip(ops, ir.ssa):
assert op is ssa.op
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs)
op.output.extend(ssa_name(name, ssa.out_versions) for name in outputs)
shapes.clear()
shapes.update(shapes2)
if track_blob_names:
track_blob_names.clear()
track_blob_names.update(track_blob_names2)
def _get_blob_names(ops):
names = set()
for op in ops:
names.update(op.input)
names.update(op.output)
return {name: name for name in names}
def _remap_keys(m, f):
m2 = {f(key): value for key, value in m.items()}
m.clear()
m.update(m2)
def _rename_all(shapes, track_blob_names, ops, f):
seen = set()
renamed = {}
def g(name):
""" Collision-free version of f.
"""
if name is None:
return None
if name in renamed:
return renamed[name]
name2 = _make_unique_name(seen, f(name))
renamed[name] = name2
return name2
for op in ops:
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
op.input.extend(g(name) for name in inputs)
op.output.extend(g(name) for name in outputs)
_remap_keys(shapes, g)
if track_blob_names:
_remap_keys(track_blob_names, g)
# Rename all operator names (if any) independently so that the
# unique-fication happens only once in _fill_missing_operator_names().
seen.clear()
renamed.clear()
for op in ops:
op.name = g(op.name)
def _add_gradient_scope(shapes, track_blob_names, ops):
"""
For all operators or blobs with name containing "_grad", add a
"GRADIENTS/" scope.
Note: breaks graph execution since the blob -> gradient mapping is
hardcoded.
"""
def f(name):
if '_grad' in name:
return 'GRADIENTS/{}'.format(name)
else:
return name
_rename_all(shapes, track_blob_names, ops, f)
def _replace_colons(shapes, track_blob_names, ops, repl):
"""
`:i` has a special meaning in Tensorflow.
"""
def f(name):
return name.replace(':', repl)
_rename_all(shapes, track_blob_names, ops, f)
def _fill_missing_operator_names(ops):
''' Give missing operators a name.
We expect C2 operators to be generally unnamed. This gives them a scope
(inferred from their outputs) and a name after their type. Duplicates will
be postfixed by an index.
'''
seen = set()
for op in ops:
# Make sure operator names don't collide with blobs.
seen.update(op.input)
seen.update(op.output)
for op in ops:
if op.name:
name = op.name
elif op.output or op.input:
l = [os.path.dirname(name) for name in op.output or op.input]
scope = os.path.commonprefix(l)
name = os.path.join(scope, op.type)
else:
name = op.type
assert(name)
op.name = _make_unique_name(seen, name)
def _tf_device(device_option):
if not device_option.HasField("device_type"):
return ""
if device_option.device_type == caffe2_pb2.CPU:
return "/cpu:*"
if device_option.device_type == caffe2_pb2.CUDA:
return "/gpu:{}".format(device_option.device_id)
raise Exception("Unhandled device", device_option)
def _add_tf_shape(m, ints):
sh = tensor_shape_pb2.TensorShapeProto()
for i in ints:
dim = tensor_shape_pb2.TensorShapeProto.Dim()
dim.size = i
sh.dim.extend([dim])
m['_output_shapes'].list.shape.extend([sh])
def _set_tf_attr(m, arg):
k = arg.name
if k == 'shape' and arg.ints:
_add_tf_shape(m, arg.ints)
return
if arg.HasField("f"):
m[k].f = arg.f
return
if arg.HasField("i"):
m[k].i = arg.i
return
if arg.HasField("s"):
m[k].s = (
arg.s if isinstance(arg.s, bytes) else str(arg.s).encode('utf-8')
)
return
if arg.floats:
m[k].list.f.extend(arg.floats)
return
if arg.ints:
m[k].list.i.extend(arg.ints)
return
if arg.strings:
m[k].list.s.extend(
s if isinstance(s, bytes) else str(s).encode('utf-8')
for s in arg.strings
)
return
# The value is an empty list.
m[k].list.s.extend([])
def _operator_to_node(shapes, op):
assert op.name, op
n = NodeDef()
n.name = op.name
n.input.extend(op.input)
n.op = op.type
n.device = _tf_device(op.device_option)
if shapes:
# Add shapes in order.
for output in op.output:
if output not in shapes:
break
_add_tf_shape(n.attr, shapes[output])
for arg in op.arg:
_set_tf_attr(n.attr, arg)
return n
def _blob_to_node(producing_ops, shapes, name):
assert name
n = NodeDef()
n.name = name
inputs = producing_ops.get(name, [])
if inputs:
n.op = 'Blob'
else:
n.op = 'Placeholder'
n.input.extend('%s:%d' % (op.name, i) for op, i in inputs)
if inputs:
device = inputs[0][0].device_option
if (all(input[0].device_option == device for input in inputs)):
n.device = _tf_device(device)
if shapes and name in shapes:
_add_tf_shape(n.attr, shapes[name])
return n
def _operators_to_graph_def(
shapes,
ops,
replace_colons='$',
with_ssa=True,
with_gradient_scope=True,
track_blob_names=None, # pass an empty array to track blob names
):
if track_blob_names is not None:
track_blob_names.clear()
track_blob_names.update(_get_blob_names(ops))
if replace_colons:
_replace_colons(shapes, track_blob_names, ops, replace_colons)
if with_ssa:
_convert_to_ssa(shapes, track_blob_names, ops)
if with_gradient_scope:
_add_gradient_scope(shapes, track_blob_names, ops)
_fill_missing_operator_names(ops)
g = GraphDef()
producing_ops = {}
blobs = set()
for op in ops:
g.node.extend([_operator_to_node(shapes, op)])
for input_blob in op.input:
blobs.add(input_blob)
for i, output_blob in enumerate(op.output):
blobs.add(output_blob)
producing_ops.setdefault(output_blob, []).append((op, i))
for blob in blobs:
g.node.extend([_blob_to_node(producing_ops, shapes, blob)])
return g
def _propagate_device_option(net):
if not net.HasField("device_option"):
return
for op in net.op:
if not op.HasField("device_option"):
op.device_option.CopyFrom(net.device_option)
def _try_get_shapes(nets):
try:
# Note: this will inspect the workspace for better or worse.
shapes, _ = workspace.InferShapesAndTypes(nets)
return shapes
except Exception as e:
logging.warning('Failed to compute shapes: %s', e)
return {}
def nets_to_graph_def(nets, shapes=None, **kwargs):
if shapes is None:
shapes = _try_get_shapes(nets)
nets = [copy.deepcopy(net.Proto()) for net in nets]
shapes = copy.deepcopy(shapes)
for net in nets:
_propagate_device_option(net)
return _operators_to_graph_def(
shapes,
[op for net in nets for op in net.op],
**kwargs
)
def cnn_to_graph_def(cnn, **kwargs):
return nets_to_graph_def([cnn.param_init_net, cnn.net], **kwargs)
def ops_to_graph_def(ops, shapes=None, **kwargs):
ops = copy.deepcopy(ops)
shapes = copy.deepcopy(shapes or {})
return _operators_to_graph_def(shapes, ops, **kwargs)
|
pytorch-master
|
caffe2/contrib/tensorboard/tensorboard_exporter.py
|
import click.testing
import numpy as np
import os
import tempfile
import unittest
from caffe2.python import brew, core, model_helper
import caffe2.contrib.tensorboard.tensorboard as tb
import caffe2.contrib.tensorboard.tensorboard_exporter as tb_exporter
try:
# tensorboard>=1.14.0
from tensorboard.compat.proto.graph_pb2 import GraphDef
except ImportError:
from tensorflow import GraphDef
def load_events(filename):
try:
# tensorboard>=1.14.0
from tensorboard.backend.event_processing import event_file_loader
loader = event_file_loader.EventFileLoader(filename)
return list(loader.Load())
except ImportError:
import tensorflow as tf
return list(tf.train.summary_iterator(filename))
class TensorboardTest(unittest.TestCase):
def test_events(self):
runner = click.testing.CliRunner()
c2_dir = tempfile.mkdtemp()
np.random.seed(1701)
n_iters = 2
blobs = ["w", "b"]
data = np.random.randn(len(blobs), n_iters, 10)
for i, blob in enumerate(blobs):
with open(os.path.join(c2_dir, blob), "w") as f:
for row in data[i]:
stats = [row.min(), row.max(), row.mean(), row.std()]
f.write(" ".join(str(s) for s in stats) + "\n")
# Test error handling path
with open(os.path.join(c2_dir, "not-a-summary"), "w") as f:
f.write("not-a-summary")
tf_dir = tempfile.mkdtemp()
result = runner.invoke(
tb.cli,
["tensorboard-events", "--c2-dir", c2_dir, "--tf-dir", tf_dir])
self.assertEqual(result.exit_code, 0)
entries = list(os.walk(tf_dir))
self.assertEqual(len(entries), 1)
((d, _, (fname,)),) = entries
self.assertEqual(tf_dir, d)
events = load_events(os.path.join(tf_dir, fname))
self.assertEqual(len(events), n_iters + 1)
events = events[1:]
self.maxDiff = None
self.assertEqual(len(events), 2)
def test_tensorboard_graphs(self):
model = model_helper.ModelHelper(name="overfeat")
data, label = brew.image_input(
model, ["db"], ["data", "label"], is_test=0
)
with core.NameScope("conv1"):
conv1 = brew.conv(model, data, "conv1", 3, 96, 11, stride=4)
relu1 = brew.relu(model, conv1, conv1)
pool1 = brew.max_pool(model, relu1, "pool1", kernel=2, stride=2)
with core.NameScope("classifier"):
fc = brew.fc(model, pool1, "fc", 4096, 1000)
pred = brew.softmax(model, fc, "pred")
xent = model.LabelCrossEntropy([pred, label], "xent")
loss = model.AveragedLoss(xent, "loss")
model.AddGradientOperators([loss], skip=1)
c2_dir = tempfile.mkdtemp()
tf_dir = tempfile.mkdtemp()
with open(os.path.join(c2_dir, "init"), "w") as f:
f.write(str(model.param_init_net.Proto()))
with open(os.path.join(c2_dir, "net"), "w") as f:
f.write(str(model.net.Proto()))
runner = click.testing.CliRunner()
result = runner.invoke(
tb.cli,
["tensorboard-graphs",
"--c2-netdef", os.path.join(c2_dir, "init"),
"--c2-netdef", os.path.join(c2_dir, "net"),
"--tf-dir", tf_dir])
self.assertEqual(result.exit_code, 0)
entries = list(os.walk(tf_dir))
self.assertEqual(len(entries), 1)
((d, _, (fname,)),) = entries
self.assertEqual(tf_dir, d)
events = load_events(os.path.join(tf_dir, fname))
self.assertEqual(len(events), 3)
events = events[1:]
nets = [model.param_init_net, model.net]
for i, (event, net) in enumerate(zip(events, nets), start=1):
self.assertEqual(event.step, i)
self.assertEqual(event.wall_time, i)
g = GraphDef()
g.ParseFromString(event.graph_def)
self.assertMultiLineEqual(
str(g),
str(tb_exporter.nets_to_graph_def([net])))
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/contrib/tensorboard/tensorboard_test.py
|
import click
import collections
import logging
import numpy as np
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.contrib.tensorboard.tensorboard_exporter as tb_exporter
try:
# tensorboard>=1.14.0
from tensorboard.compat.proto.summary_pb2 import Summary, HistogramProto
from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.summary.writer.event_file_writer import EventFileWriter as FileWriter
except ImportError:
from tensorflow.core.framework.summary_pb2 import Summary, HistogramProto
from tensorflow.core.util.event_pb2 import Event
try:
# tensorflow>=1.0.0
from tensorflow.summary import FileWriter
except ImportError:
# tensorflow<=0.12.1
from tensorflow.train import SummaryWriter as FileWriter
class Config(object):
HEIGHT = 600
ASPECT_RATIO = 1.6
CODE_TEMPLATE = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import"
href="https://tensorboard.appspot.com/tf-graph-basic.build.html"
onload=load()
>
<div style="height:{height}px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
"""
IFRAME_TEMPLATE = """
<iframe
seamless
style="width:{width}px;height:{height}px;border:0"
srcdoc="{code}">
</iframe>
"""
def _show_graph(graph_def):
import IPython.display
code = CODE_TEMPLATE.format(
data=repr(str(graph_def)),
id='graph' + str(np.random.rand()),
height=Config.HEIGHT)
iframe = IFRAME_TEMPLATE.format(
code=code.replace('"', '"'),
width=Config.HEIGHT * Config.ASPECT_RATIO,
height=Config.HEIGHT + 20)
IPython.display.display(IPython.display.HTML(iframe))
def visualize_cnn(cnn, **kwargs):
g = tb_exporter.cnn_to_graph_def(cnn, **kwargs)
_show_graph(g)
def visualize_net(nets, **kwargs):
g = tb_exporter.nets_to_graph_def(nets, **kwargs)
_show_graph(g)
def visualize_ops(ops, **kwargs):
g = tb_exporter.ops_to_graph_def(ops, **kwargs)
_show_graph(g)
@click.group()
def cli():
pass
def write_events(tf_dir, events):
writer = FileWriter(tf_dir, len(events))
for event in events:
writer.add_event(event)
writer.flush()
writer.close()
def graph_def_to_event(step, graph_def):
return Event(
wall_time=step, step=step, graph_def=graph_def.SerializeToString())
@cli.command("tensorboard-graphs")
@click.option("--c2-netdef", type=click.Path(exists=True, dir_okay=False),
multiple=True)
@click.option("--tf-dir", type=click.Path(exists=True))
def tensorboard_graphs(c2_netdef, tf_dir):
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def parse_net_def(path):
import google.protobuf.text_format # type: ignore[import]
net_def = caffe2_pb2.NetDef()
with open(path) as f:
google.protobuf.text_format.Merge(f.read(), net_def)
return core.Net(net_def)
graph_defs = [tb_exporter.nets_to_graph_def([parse_net_def(path)])
for path in c2_netdef]
events = [graph_def_to_event(i, graph_def)
for (i, graph_def) in enumerate(graph_defs, start=1)]
write_events(tf_dir, events)
log.info("Wrote %s graphs to logdir %s", len(events), tf_dir)
@cli.command("tensorboard-events")
@click.option("--c2-dir", type=click.Path(exists=True, file_okay=False),
help="Root directory of the Caffe2 run")
@click.option("--tf-dir", type=click.Path(writable=True),
help="Output path to the logdir used by TensorBoard")
def tensorboard_events(c2_dir, tf_dir):
np.random.seed(1701)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
S = collections.namedtuple('S', ['min', 'max', 'mean', 'std'])
def parse_summary(filename):
try:
with open(filename) as f:
rows = [(float(el) for el in line.split()) for line in f]
return [S(*r) for r in rows]
except Exception as e:
log.exception(e)
return None
def get_named_summaries(root):
summaries = [
(fname, parse_summary(os.path.join(dirname, fname)))
for dirname, _, fnames in os.walk(root)
for fname in fnames
]
return [(n, s) for (n, s) in summaries if s]
def inferred_histo(summary, samples=1000):
np.random.seed(
hash(
summary.std + summary.mean + summary.min + summary.max
) % np.iinfo(np.int32).max
)
samples = np.random.randn(samples) * summary.std + summary.mean
samples = np.clip(samples, a_min=summary.min, a_max=summary.max)
(hist, edges) = np.histogram(samples)
upper_edges = edges[1:]
r = HistogramProto(
min=summary.min,
max=summary.max,
num=len(samples),
sum=samples.sum(),
sum_squares=(samples * samples).sum())
r.bucket_limit.extend(upper_edges)
r.bucket.extend(hist)
return r
def named_summaries_to_events(named_summaries):
names = [n for (n, _) in named_summaries]
summaries = [s for (_, s) in named_summaries]
summaries = list(zip(*summaries))
def event(step, values):
s = Summary()
scalar = [
Summary.Value(
tag="{}/{}".format(name, field),
simple_value=v)
for name, value in zip(names, values)
for field, v in value._asdict().items()]
hist = [
Summary.Value(
tag="{}/inferred_normal_hist".format(name),
histo=inferred_histo(value))
for name, value in zip(names, values)
]
s.value.extend(scalar + hist)
return Event(wall_time=int(step), step=step, summary=s)
return [event(step, values)
for step, values in enumerate(summaries, start=1)]
named_summaries = get_named_summaries(c2_dir)
events = named_summaries_to_events(named_summaries)
write_events(tf_dir, events)
log.info("Wrote %s events to logdir %s", len(events), tf_dir)
if __name__ == "__main__":
cli()
|
pytorch-master
|
caffe2/contrib/tensorboard/tensorboard.py
|
import unittest
from caffe2.proto import caffe2_pb2
import caffe2.python.cnn as cnn
import caffe2.python.core as core
import caffe2.contrib.tensorboard.tensorboard_exporter as tb
EXPECTED = """
node {
name: "conv1/XavierFill"
op: "XavierFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 96
}
dim {
size: 3
}
dim {
size: 11
}
dim {
size: 11
}
}
}
}
}
}
node {
name: "conv1/ConstantFill"
op: "ConstantFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 96
}
}
}
}
}
}
node {
name: "classifier/XavierFill"
op: "XavierFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 1000
}
dim {
size: 4096
}
}
}
}
}
}
node {
name: "classifier/ConstantFill"
op: "ConstantFill"
device: "/gpu:0"
attr {
key: "_output_shapes"
value {
list {
shape {
dim {
size: 1000
}
}
}
}
}
}
node {
name: "ImageInput"
op: "ImageInput"
input: "db"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "is_test"
value {
i: 0
}
}
attr {
key: "use_cudnn"
value {
i: 1
}
}
}
node {
name: "NHWC2NCHW"
op: "NHWC2NCHW"
input: "data_nhwc"
device: "/gpu:0"
}
node {
name: "conv1/Conv"
op: "Conv"
input: "data"
input: "conv1/conv1_w"
input: "conv1/conv1_b"
device: "/gpu:0"
attr {
key: "exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 11
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 4
}
}
}
node {
name: "conv1/Relu"
op: "Relu"
input: "conv1/conv1"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "conv1/MaxPool"
op: "MaxPool"
input: "conv1/conv1_1"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 2
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 2
}
}
}
node {
name: "classifier/FC"
op: "FC"
input: "conv1/pool1"
input: "classifier/fc_w"
input: "classifier/fc_b"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "use_cudnn"
value {
i: 1
}
}
}
node {
name: "classifier/Softmax"
op: "Softmax"
input: "classifier/fc"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "classifier/LabelCrossEntropy"
op: "LabelCrossEntropy"
input: "classifier/pred"
input: "label"
device: "/gpu:0"
}
node {
name: "classifier/AveragedLoss"
op: "AveragedLoss"
input: "classifier/xent"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/ConstantFill"
op: "ConstantFill"
input: "classifier/loss"
device: "/gpu:0"
attr {
key: "value"
value {
f: 1.0
}
}
}
node {
name: "GRADIENTS/classifier/AveragedLossGradient"
op: "AveragedLossGradient"
input: "classifier/xent"
input: "GRADIENTS/classifier/loss_autogen_grad"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/LabelCrossEntropyGradient"
op: "LabelCrossEntropyGradient"
input: "classifier/pred"
input: "label"
input: "GRADIENTS/classifier/xent_grad"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/SoftmaxGradient"
op: "SoftmaxGradient"
input: "classifier/pred"
input: "GRADIENTS/classifier/pred_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "GRADIENTS/c/FCGradient"
op: "FCGradient"
input: "conv1/pool1"
input: "classifier/fc_w"
input: "GRADIENTS/classifier/fc_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "use_cudnn"
value {
i: 1
}
}
}
node {
name: "GRADIENTS/conv1/MaxPoolGradient"
op: "MaxPoolGradient"
input: "conv1/conv1_1"
input: "conv1/pool1"
input: "GRADIENTS/conv1/pool1_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 2
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 2
}
}
}
node {
name: "GRADIENTS/conv1/ReluGradient"
op: "ReluGradient"
input: "conv1/conv1_1"
input: "GRADIENTS/conv1/conv1_grad"
device: "/gpu:0"
attr {
key: "cudnn_exhaustive_search"
value {
i: 0
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
}
node {
name: "GRADIENTS/ConvGradient"
op: "ConvGradient"
input: "data"
input: "conv1/conv1_w"
input: "GRADIENTS/conv1/conv1_grad_1"
device: "/gpu:0"
attr {
key: "exhaustive_search"
value {
i: 0
}
}
attr {
key: "kernel"
value {
i: 11
}
}
attr {
key: "order"
value {
s: "NCHW"
}
}
attr {
key: "stride"
value {
i: 4
}
}
}
node {
name: "GRADIENTS/NCHW2NHWC"
op: "NCHW2NHWC"
input: "GRADIENTS/data_grad"
device: "/gpu:0"
}
node {
name: "conv1/conv1_w"
op: "Blob"
input: "conv1/XavierFill:0"
device: "/gpu:0"
}
node {
name: "classifier/fc"
op: "Blob"
input: "classifier/FC:0"
device: "/gpu:0"
}
node {
name: "data_nhwc"
op: "Blob"
input: "ImageInput:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_b_grad"
op: "Blob"
input: "GRADIENTS/ConvGradient:1"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/pred_grad"
op: "Blob"
input: "GRADIENTS/classifier/LabelCrossEntropyGradient:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/fc_grad"
op: "Blob"
input: "GRADIENTS/classifier/SoftmaxGradient:0"
device: "/gpu:0"
}
node {
name: "conv1/conv1_b"
op: "Blob"
input: "conv1/ConstantFill:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/fc_b_grad"
op: "Blob"
input: "GRADIENTS/c/FCGradient:1"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/fc_w_grad"
op: "Blob"
input: "GRADIENTS/c/FCGradient:0"
device: "/gpu:0"
}
node {
name: "label"
op: "Blob"
input: "ImageInput:1"
device: "/gpu:0"
}
node {
name: "GRADIENTS/data_grad"
op: "Blob"
input: "GRADIENTS/ConvGradient:2"
device: "/gpu:0"
}
node {
name: "classifier/loss"
op: "Blob"
input: "classifier/AveragedLoss:0"
device: "/gpu:0"
}
node {
name: "conv1/conv1"
op: "Blob"
input: "conv1/Conv:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_grad"
op: "Blob"
input: "GRADIENTS/conv1/MaxPoolGradient:0"
device: "/gpu:0"
}
node {
name: "classifier/xent"
op: "Blob"
input: "classifier/LabelCrossEntropy:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/loss_autogen_grad"
op: "Blob"
input: "GRADIENTS/classifier/ConstantFill:0"
device: "/gpu:0"
}
node {
name: "classifier/fc_w"
op: "Blob"
input: "classifier/XavierFill:0"
device: "/gpu:0"
}
node {
name: "conv1/conv1_1"
op: "Blob"
input: "conv1/Relu:0"
device: "/gpu:0"
}
node {
name: "db"
op: "Placeholder"
}
node {
name: "classifier/pred"
op: "Blob"
input: "classifier/Softmax:0"
device: "/gpu:0"
}
node {
name: "classifier/fc_b"
op: "Blob"
input: "classifier/ConstantFill:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/classifier/xent_grad"
op: "Blob"
input: "GRADIENTS/classifier/AveragedLossGradient:0"
device: "/gpu:0"
}
node {
name: "data"
op: "Blob"
input: "NHWC2NCHW:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_w_grad"
op: "Blob"
input: "GRADIENTS/ConvGradient:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/conv1_grad_1"
op: "Blob"
input: "GRADIENTS/conv1/ReluGradient:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/data_nhwc_grad"
op: "Blob"
input: "GRADIENTS/NCHW2NHWC:0"
device: "/gpu:0"
}
node {
name: "GRADIENTS/conv1/pool1_grad"
op: "Blob"
input: "GRADIENTS/c/FCGradient:2"
device: "/gpu:0"
}
node {
name: "conv1/pool1"
op: "Blob"
input: "conv1/MaxPool:0"
device: "/gpu:0"
}
"""
class TensorboardExporterTest(unittest.TestCase):
def test_that_operators_gets_non_colliding_names(self):
op = caffe2_pb2.OperatorDef()
op.type = 'foo'
op.input.extend(['foo'])
tb._fill_missing_operator_names([op])
self.assertEqual(op.input[0], 'foo')
self.assertEqual(op.name, 'foo_1')
def test_that_replacing_colons_gives_non_colliding_names(self):
# .. and update shapes
op = caffe2_pb2.OperatorDef()
op.name = 'foo:0'
op.input.extend(['foo:0', 'foo$0'])
shapes = {'foo:0': [1]}
track_blob_names = tb._get_blob_names([op])
tb._replace_colons(shapes, track_blob_names, [op], '$')
self.assertEqual(op.input[0], 'foo$0')
self.assertEqual(op.input[1], 'foo$0_1')
# Collision but blobs and op names are handled later by
# _fill_missing_operator_names.
self.assertEqual(op.name, 'foo$0')
self.assertEqual(len(shapes), 1)
self.assertEqual(shapes['foo$0'], [1])
self.assertEqual(len(track_blob_names), 2)
self.assertEqual(track_blob_names['foo$0'], 'foo:0')
self.assertEqual(track_blob_names['foo$0_1'], 'foo$0')
def test_that_adding_gradient_scope_does_no_fancy_renaming(self):
# because it cannot create collisions
op = caffe2_pb2.OperatorDef()
op.name = 'foo_grad'
op.input.extend(['foo_grad', 'foo_grad_1'])
shapes = {'foo_grad': [1]}
track_blob_names = tb._get_blob_names([op])
tb._add_gradient_scope(shapes, track_blob_names, [op])
self.assertEqual(op.input[0], 'GRADIENTS/foo_grad')
self.assertEqual(op.input[1], 'GRADIENTS/foo_grad_1')
self.assertEqual(op.name, 'GRADIENTS/foo_grad')
self.assertEqual(len(shapes), 1)
self.assertEqual(shapes['GRADIENTS/foo_grad'], [1])
self.assertEqual(len(track_blob_names), 2)
self.assertEqual(
track_blob_names['GRADIENTS/foo_grad'], 'foo_grad')
self.assertEqual(
track_blob_names['GRADIENTS/foo_grad_1'], 'foo_grad_1')
def test_that_auto_ssa_gives_non_colliding_names(self):
op1 = caffe2_pb2.OperatorDef()
op1.output.extend(['foo'])
op2 = caffe2_pb2.OperatorDef()
op2.input.extend(['foo'])
op2.output.extend(['foo'])
op2.output.extend(['foo_1'])
shapes = {'foo': [1], 'foo_1': [2]}
track_blob_names = tb._get_blob_names([op1, op2])
tb._convert_to_ssa(shapes, track_blob_names, [op1, op2])
self.assertEqual(op1.output[0], 'foo')
self.assertEqual(op2.input[0], 'foo')
self.assertEqual(op2.output[0], 'foo_1')
# Unfortunate name but we do not parse original `_` for now.
self.assertEqual(op2.output[1], 'foo_1_1')
self.assertEqual(len(shapes), 3)
self.assertEqual(shapes['foo'], [1])
self.assertEqual(shapes['foo_1'], [1])
self.assertEqual(shapes['foo_1_1'], [2])
self.assertEqual(len(track_blob_names), 3)
self.assertEqual(track_blob_names['foo'], 'foo')
self.assertEqual(track_blob_names['foo_1'], 'foo')
self.assertEqual(track_blob_names['foo_1_1'], 'foo_1')
def test_simple_cnnmodel(self):
model = cnn.CNNModelHelper("NCHW", name="overfeat")
data, label = model.ImageInput(["db"], ["data", "label"], is_test=0)
with core.NameScope("conv1"):
conv1 = model.Conv(data, "conv1", 3, 96, 11, stride=4)
relu1 = model.Relu(conv1, conv1)
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
with core.NameScope("classifier"):
fc = model.FC(pool1, "fc", 4096, 1000)
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, label], "xent")
loss = model.AveragedLoss(xent, "loss")
model.net.RunAllOnGPU()
model.param_init_net.RunAllOnGPU()
model.AddGradientOperators([loss], skip=1)
track_blob_names = {}
graph = tb.cnn_to_graph_def(
model,
track_blob_names=track_blob_names,
shapes={},
)
self.assertEqual(
track_blob_names['GRADIENTS/conv1/conv1_b_grad'],
'conv1/conv1_b_grad',
)
self.maxDiff = None
# We can't guarantee the order in which they appear, so we sort
# both before we compare them
sep = "node {"
expected = "\n".join(sorted(
sep + "\n " + part.strip()
for part in EXPECTED.strip().split(sep)
if part.strip()
))
actual = "\n".join(sorted(
sep + "\n " + part.strip()
for part in str(graph).strip().split(sep)
if part.strip()
))
self.assertMultiLineEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/contrib/tensorboard/tensorboard_exporter_test.py
|
#!/bin/env python3
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import sys
import yaml
import argparse
import os
from copy import deepcopy
from typing import Dict, List, Set
parser = argparse.ArgumentParser()
parser.add_argument("--template_dir", default=".", help="where template.h is")
parser.add_argument("--yaml_dir", default="aten/src/ATen/ATen",
help="where ATen yaml files are")
parser.add_argument("--output_prefix", default="", help="")
parser.add_argument(
"--install_dir", default=".", help="where to put generated file")
parser.add_argument("--aten_root", default="", help="root directory of aten")
args, _ = parser.parse_known_args()
if args.aten_root:
if not os.path.exists(args.aten_root):
raise ValueError('aten_root ({}) does not exist'.format(
args.aten_root))
sys.path.insert(0, os.path.join(args.aten_root, '..'))
from torchgen.code_template import CodeTemplate as CT
else:
from torchgen.code_template import CodeTemplate as CT
OP_TEMPLATE = CT.from_file(
os.path.join(args.template_dir, 'aten_op_template.h'))
try:
# use faster C loader if available
from yaml import CSafeLoader as Loader
except ImportError:
from yaml import SafeLoader as Loader # type: ignore[misc]
def write(filename, s):
with open(filename, "w") as f:
f.write(s)
def read(filename):
with open(filename, "r") as f:
return f.read()
def value_has_tensors(v):
# Sparse shouldn't appear in public API, seems to be temporary bug
return "Tensor" in v['dynamic_type'] and "Sparse" not in v['dynamic_type']
def value_is_tensor_type(v):
return value_has_tensors(v) and v['dynamic_type'] not in ['at::TensorList', 'const c10::List<c10::optional<at::Tensor>> &']
# for each aten type, how do we handle a return value of that type?
RETURN_MAP = {
'at::Tensor': 'assignTo(Output(${offset}),${output});',
'at::Scalar': 'assignTo(Output(${offset}),${output}.type(), ${output});',
'bool': 'assignToValue<int64_t>(Output(${offset}),${output});',
'int64_t': 'assignToValue<int64_t>(Output(${offset}),${output});',
'::std::vector<at::Tensor>': 'assignListStartingAt(${offset}, ${output});',
}
# for each non-Tensor aten argument, how to we read it from caffe2's
# attribute list. Most of these call runtime functions defined in the
# template class.
ARGUMENT_MAP = {
'const at::Scalar &': 'at::Scalar ${arg} = readScalarAttribute("${arg}");',
'bool': 'bool ${arg} = readAttribute<int64_t>("${arg}");',
'int': 'int ${arg} = readAttribute<int64_t>("${arg}");',
'double': 'double ${arg} = readAttribute<float>("${arg}");',
'int64_t': 'int64_t ${arg} = readAttribute<int64_t>("${arg}");',
'at::IntArrayRef': 'auto ${arg} = readIntArrayRef("${arg}");',
'::std::array<bool,2>': 'auto ${arg} = readBoolMask<2>("${arg}");',
'::std::array<bool,3>': 'auto ${arg} = readBoolMask<3>("${arg}");',
}
# for BC reasons we want to route some of the functions to different
# implementations
SPECIAL_IMPLEMENTATIONS = {
'index': 'internal::index_with_uint8_handling',
}
def expand(o):
num_defaults = sum(1 if 'default' in arg else 0 for arg in o['arguments'])
results = [o]
for i in range(0, num_defaults):
# last num_default values should be default
assert('default' in o['arguments'][-(i + 1)])
v = deepcopy(o)
v['arguments'] = v['arguments'][:-(i + 1)]
results.append(v)
return results
# filter the list of declarations removing things we cannot support
def supports(o, factory_methods):
# Ignore all families (!) of functions that have TensorOptions (i.e. tensor factory methods).
if o['name'] in factory_methods:
if factory_methods[o['name']] == 0:
print("Skipping {} because it is a factory method".format(o['name']))
factory_methods[o['name']] += 1
return False
# skip all in-place operators for now since aten cannot Resize
# caffe2 memory inside an operator
if o['inplace']:
return False
# _out variants also work in-place on arguments taken as destinations
# we also cannot handle these because aten cannot resize caffe2 Tensors
if "_out" in o['name']:
return False
# skip if no return, previously it is 'void'
if len(o['returns']) == 0:
return False
# skip return types we cannot handle
for ret in o['returns']:
if not value_has_tensors(ret) and ret['type'] not in RETURN_MAP:
print("Skipping {} Because of Ret: {} ({})".format(
o['name'], ret['type'], ret['dynamic_type']))
return False
# skip arguments we cannot handle
for arg in o['arguments']:
if not value_has_tensors(arg) and arg['type'] not in ARGUMENT_MAP:
print("Skipping {} Because of Arg: {} ({}) ".format(
o['name'], arg['type'], arg['dynamic_type']))
return False
return True
# template for each potential operator.
# each operator has an integer 'key' associated with it, and
# a lambda that defines the operator
# non-tensor attributes are created in ${initialization}
# and then saved as arguments to the lambda
# Inputs/Outputs are read inside the lambda
#
# each implementation is defined in a separate method annotated with
# C10_NOINLINE to avoid inlining into the ATenOp constructor, which would
# trigger pathological compile times.
IMPLEMENTATION_TEMPLATE = CT("""\
C10_NOINLINE void implementation_${key}() { // ${name}
${initialization}
run_op = [=] {
at::AutoDispatchBelowAutograd guard;
${statements}
auto the_result = ${invocation};
${assignments}
return true;
};
}
""")
CASE_TEMPLATE = CT("""\
case ${key}: // ${name}
implementation_${key}();
break;
""")
ASSIGN_CHECK_SIZE_TEMPLATE = CT("""\
if(OutputSize() > ${offset}) {${assignment}}
""")
def get_output(o, i):
if len(o['returns']) == 1:
return 'the_result'
else:
return '::std::get<{}>(the_result)'.format(i)
def attribute_names(o):
return sorted([a['name'] for a in o['arguments'] if not value_has_tensors(a)])
def required_attribute_names(o):
return sorted([a['name'] for a in o['arguments'] if not value_has_tensors(a) and 'default' not in a])
def self_as_first_argument(arguments):
return ([a for a in arguments if a['name'] == 'self'] +
[a for a in arguments if a['name'] != 'self'])
def get_num_inputs(o):
args = 0
for a in o['arguments']:
if a['type'] in ['at::TensorList', 'const c10::List<c10::optional<at::Tensor>> &']:
return '*'
elif value_has_tensors(a):
args += 1
return str(args)
def find_factory_methods(decls):
factory_methods = {}
for o in decls:
if any(arg['dynamic_type'] == 'at::TensorOptions' for arg in o['arguments']):
factory_methods[o['name']] = 0
return factory_methods
def emit_assignments(o, env):
for i, r in enumerate(o['returns']):
t = RETURN_MAP[r['type'] if not value_is_tensor_type(r) else 'at::Tensor']
assignment = CT(t).substitute(env, offset=i, output=get_output(o, i))
check_size_assignment = ASSIGN_CHECK_SIZE_TEMPLATE.substitute(env, offset=i, assignment=assignment)
env['assignments'].append(check_size_assignment)
if __name__ == '__main__':
decls = yaml.load(read(os.path.join(args.yaml_dir, 'Declarations.yaml')), Loader=Loader)
factory_methods = find_factory_methods(decls)
filtered = [expanded for o in decls for expanded in expand(o) if supports(expanded, factory_methods)]
top_env: Dict[str, List] = {
'mappings': [],
'implementations': [],
'cases': [],
}
seen: Set[str] = set()
key = 0
for o in filtered:
# [DESCRIPTORS]
# each option is associated with a descriptor string that is used
# to figure out which version of an op is being used:
# The format is:
# opname-num_inputs-attribute_1-attribute2
# Example:
# lerp-2-weight
# the operator lerp takes 2 arguments and has the attribute weight
attr_names = attribute_names(o)
num_inputs = get_num_inputs(o)
descriptor = '-'.join([o['name']] + attr_names + [num_inputs])
if descriptor in seen:
continue
seen.add(descriptor)
# map from descriptor string to the integer key in the switch statements
# that initializes the operators
top_env['mappings'].append('{{ "{}", {} }},'.format(descriptor, key))
env = {
'name': o['name'],
'statements': [],
'arguments': [],
'assignments': [],
'initialization': [],
'key': str(key),
}
if 'namespace' not in o['method_of'] and 'Tensor' not in o['method_of']:
# methods on type like 'ones' or 'zeros' always take a
# string attribute that is translated into the at::Type object
# e.g. "Float" is at::kFloat
assert('Type' in o['method_of'])
static_tensor_inputs = sum(arg['type'] not in ['at::TensorList', 'const c10::List<c10::optional<at::Tensor>> &'] and value_is_tensor_type(arg) for arg in o['arguments'])
has_tensorlist = any(arg['type'] in ['at::TensorList', 'const c10::List<c10::optional<at::Tensor>> &'] for arg in o['arguments'])
if has_tensorlist:
tensorlist_idx = [i for i, arg in enumerate(o['arguments']) if arg['type'] in ['at::TensorList', 'const c10::List<c10::optional<at::Tensor>> &']][0]
real_inputs = 0
for i, arg in enumerate(o['arguments']):
env['arguments'].append(arg['name'])
# Pretend the flat argument list is a stack where the end is the top.
view_length = 'InputSize()' if has_tensorlist and i < tensorlist_idx else static_tensor_inputs
if arg['type'] == 'at::TensorList':
# NOTE: do not advance real_inputs here. After this we will
# switch to indexing the "stack" from the end
env['statements'].append(
'auto {} = peekSlice({}, InputSize() - {}, InputSize());'
.format(arg['name'], real_inputs, static_tensor_inputs))
elif arg['type'] == 'const c10::List<c10::optional<at::Tensor>> &':
# NOTE: do not advance real_inputs here. After this we will
# switch to indexing the "stack" from the end
env['statements'].append(
'auto {} = peekSliceOptionals({}, InputSize() - {}, InputSize());'
.format(arg['name'], real_inputs, static_tensor_inputs))
elif value_is_tensor_type(arg):
# load tensor inputs from Caffe2
env['statements'].append(
'auto {} = peek({}, {});'.format(arg['name'], real_inputs, view_length))
real_inputs += 1
else:
init = CT(ARGUMENT_MAP[arg['type']]).substitute(env, arg=arg['name'])
env['initialization'].append(init)
emit_assignments(o, env)
if o['name'] in SPECIAL_IMPLEMENTATIONS:
env['invocation'] = "{}({})".format(SPECIAL_IMPLEMENTATIONS[o['name']], ','.join(env['arguments']))
elif 'namespace' in o['method_of']:
env['invocation'] = CT("at::${name}(${arguments})").substitute(env)
else:
assert('Tensor' in o['method_of'])
env['invocation'] = "self.{}({})".format(
o['name'], ', '.join(env['arguments'][1:]))
top_env['implementations'].append(IMPLEMENTATION_TEMPLATE.substitute(env))
top_env['cases'].append(CASE_TEMPLATE.substitute(env))
key += 1
write(os.path.join(args.install_dir, args.output_prefix + "aten_op.h"), OP_TEMPLATE.substitute(top_env))
|
pytorch-master
|
caffe2/contrib/aten/gen_op.py
|
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestATen(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=2), **hu.gcs)
def test_add(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["X", "Y"],
["Z"],
operator="add")
def ref(X, Y):
return [X + Y]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=2, dtype=np.float16), **hu.gcs_gpu_only)
def test_add_half(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["X", "Y"],
["Z"],
operator="add")
def ref(X, Y):
return [X + Y]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_pow(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="pow", exponent=2.0)
def ref(X):
return [np.square(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(x=st.integers(min_value=2, max_value=8), **hu.gcs)
def test_sort(self, x, gc, dc):
inputs = [np.random.permutation(x)]
op = core.CreateOperator(
"ATen",
["S"],
["Z", "I"],
operator="sort")
def ref(X):
return [np.sort(X), np.argsort(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(inputs=hu.tensors(n=1), **hu.gcs)
def test_sum(self, inputs, gc, dc):
op = core.CreateOperator(
"ATen",
["S"],
["Z"],
operator="sum")
def ref(X):
return [np.sum(X)]
self.assertReferenceChecks(gc, op, inputs, ref)
@given(**hu.gcs)
def test_index_uint8(self, gc, dc):
# Indexing with uint8 is deprecated, but we need to provide backward compatibility for some old models exported through ONNX
op = core.CreateOperator(
"ATen",
['self', 'mask'],
["Z"],
operator="index")
def ref(self, mask):
return (self[mask.astype(np.bool_)],)
tensor = np.random.randn(2, 3, 4).astype(np.float32)
mask = np.array([[1, 0, 0], [1, 1, 0]]).astype(np.uint8)
self.assertReferenceChecks(gc, op, [tensor, mask], ref)
@given(**hu.gcs)
def test_index_put(self, gc, dc):
op = core.CreateOperator(
"ATen",
['self', 'indices', 'values'],
["Z"],
operator="index_put")
def ref(self, indices, values):
self[indices] = values
return (self,)
tensor = np.random.randn(3, 3).astype(np.float32)
mask = np.array([[True, True, True], [True, False, False], [True, True, False]])
values = np.random.randn(6).astype(np.float32)
self.assertReferenceChecks(gc, op, [tensor, mask, values], ref)
@given(**hu.gcs)
def test_unique(self, gc, dc):
op = core.CreateOperator(
"ATen",
['self'],
["output"],
sorted=True,
return_inverse=True,
# return_counts=False,
operator="_unique")
def ref(self):
index, _ = np.unique(self, return_index=False, return_inverse=True, return_counts=False)
return (index,)
tensor = np.array([1, 2, 6, 4, 2, 3, 2])
print(ref(tensor))
self.assertReferenceChecks(gc, op, [tensor], ref)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/contrib/aten/aten_test.py
|
pytorch-master
|
caffe2/contrib/aten/__init__.py
|
|
pytorch-master
|
caffe2/contrib/aten/docs/__init__.py
|
|
import tempfile
import numpy as np
from torch import nn
from torch.autograd import Variable, Function
import torch.onnx
import onnx
import caffe2.python.onnx.backend
class MyFunction(Function):
@staticmethod
def forward(ctx, x, y):
return x * x + y
@staticmethod
def symbolic(graph, x, y):
x2 = graph.at("mul", x, x)
r = graph.at("add", x2, y)
# x, y, x2, and r are 'Node' objects
# print(r) or print(graph) will print out a textual representation for debugging.
# this representation will be converted to ONNX protobufs on export.
return r
class MyModule(nn.Module):
def forward(self, x, y):
# you can combine your ATen ops with standard onnx ones
x = nn.ReLU()(x)
return MyFunction.apply(x, y)
f = tempfile.NamedTemporaryFile()
torch.onnx.export(MyModule(),
(Variable(torch.ones(3, 4)), Variable(torch.ones(3, 4))),
f, verbose=True)
# prints the graph for debugging:
# graph(%input : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu),
# %y : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
# %2 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu) = onnx::Relu(%input)
# %3 : Tensor = aten::ATen[operator="mul"](%2, %2)
# %4 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu) = aten::ATen[operator="add"](%3, %y)
# return (%4)
graph = onnx.load(f.name)
a = np.random.randn(3, 4).astype(np.float32)
b = np.random.randn(3, 4).astype(np.float32)
prepared_backend = caffe2.python.onnx.backend.prepare(graph)
W = {graph.graph.input[0].name: a, graph.graph.input[1].name: b}
c2_out = prepared_backend.run(W)[0]
x = np.maximum(a, 0)
r = x * x + b
np.testing.assert_array_almost_equal(r, c2_out)
|
pytorch-master
|
caffe2/contrib/aten/docs/sample.py
|
pytorch-master
|
caffe2/quantization/__init__.py
|
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPReluOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_relu(self, size, is_empty, gc, dc):
if is_empty:
size = 0
min_ = -10.0
max_ = 10.0
scale = (max_ - min_) / 255
zero_point = int(np.round(-min_ / scale))
X = (np.random.rand(size) * (max_ - min_) + min_).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Relu", ""), ("Relu", "DNNLOWP"), ("Int8Relu", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
engine=engine,
device_option=gc,
Y_scale=scale,
Y_zero_point=zero_point,
)
net.Proto().op.extend([quantize])
relu = core.CreateOperator(
op_type,
["X_q" if engine == "DNNLOWP" else "X"],
["Y_q" if engine == "DNNLOWP" else "Y"],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([relu])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
# Y = max(0, X) so the only error is quantization of inputs
check_quantized_results_close(outputs, ref=X)
|
pytorch-master
|
caffe2/quantization/server/relu_dnnlowp_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
class TestComputeEqualizationScaleOp(hu.HypothesisTestCase):
@settings(max_examples=10)
@given(
m=st.integers(1, 50),
n=st.integers(1, 50),
k=st.integers(1, 50),
rnd_seed=st.integers(1, 5),
**hu.gcs_cpu_only
)
def test_compute_equalization_scale(self, m, n, k, rnd_seed, gc, dc):
np.random.seed(rnd_seed)
W = np.random.rand(n, k).astype(np.float32) - 0.5
X = np.random.rand(m, k).astype(np.float32) - 0.5
def ref_compute_equalization_scale(X, W):
S = np.ones([X.shape[1]])
for j in range(W.shape[1]):
WcolMax = np.absolute(W[:, j]).max()
XcolMax = np.absolute(X[:, j]).max()
if WcolMax and XcolMax:
S[j] = np.sqrt(WcolMax / XcolMax)
return S
net = core.Net("test")
ComputeEqualizationScaleOp = core.CreateOperator(
"ComputeEqualizationScale", ["X", "W"], ["S"]
)
net.Proto().op.extend([ComputeEqualizationScaleOp])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("W").feed(W, device_option=gc)
self.ws.run(net)
S = self.ws.blobs["S"].fetch()
S_ref = ref_compute_equalization_scale(X, W)
np.testing.assert_allclose(S, S_ref, atol=1e-3, rtol=1e-3)
def test_compute_equalization_scale_shape_inference(self):
X = np.array([[1, 2], [2, 4], [6, 7]]).astype(np.float32)
W = np.array([[2, 3], [5, 4], [8, 2]]).astype(np.float32)
ComputeEqualizationScaleOp = core.CreateOperator(
"ComputeEqualizationScale", ["X", "W"], ["S"]
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
net = core.Net("test_shape_inference")
net.Proto().op.extend([ComputeEqualizationScaleOp])
shapes, types = workspace.InferShapesAndTypes(
[net],
blob_dimensions={"X": X.shape, "W": W.shape},
blob_types={"X": core.DataType.FLOAT, "W": core.DataType.FLOAT},
)
assert (
"S" in shapes and "S" in types
), "Failed to infer the shape or type of output"
self.assertEqual(shapes["S"], [1, 2])
self.assertEqual(types["S"], core.DataType.FLOAT)
|
pytorch-master
|
caffe2/quantization/server/compute_equalization_scale_test.py
|
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPBatchPermutationOpTest(hu.HypothesisTestCase):
@given(N=st.integers(min_value=1, max_value=100), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_batch_permutation(self, N, gc, dc):
X = np.round(np.random.rand(N, 10, 20, 3) * 255).astype(np.float32)
indices = np.arange(N).astype(np.int32)
np.random.shuffle(indices)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
batch_perm = core.CreateOperator(
"BatchPermutation", ["X_q", "indices"], ["Y_q"], engine="DNNLOWP"
)
net = core.Net("test_net")
net.Proto().op.extend([quantize, batch_perm])
workspace.FeedBlob("X", X)
workspace.FeedBlob("indices", indices)
workspace.RunNetOnce(net)
X_q = workspace.FetchInt8Blob("X_q").data
Y_q = workspace.FetchInt8Blob("Y_q").data
def batch_permutation_ref(X, indices):
return np.array([X[i] for i in indices])
Y_q_ref = batch_permutation_ref(X_q, indices)
np.testing.assert_allclose(Y_q, Y_q_ref)
|
pytorch-master
|
caffe2/quantization/server/batch_permutation_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPAddOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(32, 256),
is_empty=st.booleans(),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
in_place=st.sampled_from([(False, False), (True, False), (False, True)]),
**hu.gcs_cpu_only
)
def test_dnnlowp_elementwise_add_int(
self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
):
if is_empty:
N = 0
# FIXME: DNNLOWP Add doesn't support inplace operation and
# dequantize_output=1 at the same time
if in_place[0] or in_place[1]:
in_quantized = True
out_quantized = True
# A has scale 1, so exactly represented after quantization
min_ = -100
max_ = min_ + 255
A = np.round(np.random.rand(N) * (max_ - min_) + min_)
A = A.astype(np.float32)
if N != 0:
A[0] = min_
A[1] = max_
# B has scale 1/2, so exactly represented after quantization
B = np.round(np.random.rand(N) * 255 / 2 - 64).astype(np.float32)
if N != 0:
B[0] = -64
B[1] = 127.0 / 2
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Add", ""), ("Add", "DNNLOWP"), ("Int8Add", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize_A = core.CreateOperator(
"Quantize", ["A"], ["A_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_A])
quantize_B = core.CreateOperator(
"Quantize", ["B"], ["B_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_B])
out = "Y"
if in_place[0]:
out = "A"
elif in_place[1]:
out = "B"
add = core.CreateOperator(
op_type,
["A_q", "B_q"] if do_quantize else ["A", "B"],
[(out + "_q") if do_dequantize else out],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([add])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", [out + "_q"], [out], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("A").feed(A, device_option=gc)
self.ws.create_blob("B").feed(B, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs[out].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
@given(**hu.gcs_cpu_only)
def test_dnnlowp_elementwise_add_broadcast(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
min_ = -100
max_ = min_ + 255
A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)
A = A.astype(np.float32)
A[0, 0, 0, 0] = min_
A[0, 0, 0, 1] = max_
B = np.round(np.random.rand(4, 5) * 255 / 2 - 64).astype(np.float32)
B[0, 0] = -64
B[0, 1] = 127.0 / 2
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Add", ""), ("Add", "DNNLOWP"), ("Int8Add", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
add = core.CreateOperator(
op_type,
["A", "B"],
["Y"],
engine=engine,
device_option=gc,
broadcast=1,
dequantize_output=1,
)
net.Proto().op.extend([add])
self.ws.create_blob("A").feed(A, device_option=gc)
self.ws.create_blob("B").feed(B, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
@given(**hu.gcs_cpu_only)
def test_dnnlowp_elementwise_add_broadcast_axis(self, gc, dc):
for bdim, axis in [
((3, 4), 1), # broadcasting intermediate dimensions
((2,), 0), # broadcasting the first dimension
((1, 4, 1), 1),
]:
# broadcasting with single elem dimensions at both ends
min_ = -100
max_ = min_ + 255
A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)
A = A.astype(np.float32)
B = np.round(np.random.rand(*bdim) * 255 / 2 - 64).astype(np.float32)
A.flat[0] = min_
A.flat[1] = max_
B.flat[0] = -64
B.flat[1] = 127.0 / 2
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Add", ""), ("Add", "DNNLOWP"), ("Int8Add", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
add = core.CreateOperator(
op_type,
["A", "B"],
["Y"],
engine=engine,
device_option=gc,
broadcast=1,
axis=axis,
dequantize_output=1,
)
net.Proto().op.extend([add])
self.ws.create_blob("A").feed(A, device_option=gc)
self.ws.create_blob("B").feed(B, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/elementwise_add_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
avoid_vpmaddubsw_overflow_fc,
check_quantized_results_close,
run_conv_or_fc,
)
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPFullyConnectedOpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
input_channels=st.sampled_from([3, 4, 5, 8, 16, 32]),
output_channels=st.integers(2, 16),
batch_size=st.integers(0, 16),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
weight_quantized=st.booleans(),
prepack_weight=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
fuse_relu=st.booleans(),
output_packed_bias=st.booleans(),
use_input_qparam=st.booleans(),
use_output_qparam=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_fully_connected_int(
self,
input_channels,
output_channels,
batch_size,
in_quantized,
out_quantized,
weight_quantized,
prepack_weight,
preserve_activation_sparsity,
preserve_weight_sparsity,
fuse_relu,
output_packed_bias,
use_input_qparam,
use_output_qparam,
gc,
dc,
):
# X and W have scale 1, so exactly represented after quantization
X_min = 0 if preserve_activation_sparsity else -77
X_max = X_min + 255
X = np.round(
np.random.rand(batch_size, input_channels) * (X_max - X_min) + X_min
)
X = X.astype(np.float32)
# input channels 0 and 1 are all X_min to avoid overflow from vpmaddubsw
# when multiplied with W_min and W_max
X[:, 0] = X_min
if batch_size != 0:
X[0, 1] = X_max
if preserve_weight_sparsity:
W_min = -128
W_max = 100
else:
W_min = -100
W_max = W_min + 255
W = np.round(
np.random.rand(output_channels, input_channels) * (W_max - W_min) + W_min
)
W = W.astype(np.float32)
W[0, 0] = W_min
W[1, 0] = W_max
# Make sure we won't have overflows from vpmaddubsw instruction used in
# fbgemm
avoid_vpmaddubsw_overflow_fc(
batch_size,
input_channels,
output_channels,
X,
X_min,
X_max,
W,
W_min,
W_max,
)
b = np.random.randn(output_channels).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("FC", "", False, False)]
if fuse_relu:
op_engine_list += [("Int8FCRelu", "DNNLOWP", False, False)]
else:
op_engine_list += [
# type, engine, do_fuse, skip_requantization
("FC", "DNNLOWP", False, False),
("FC", "DNNLOWP_16", False, False),
("Int8FC", "DNNLOWP", False, False),
("Int8FC", "DNNLOWP", True, False),
("Int8FC", "DNNLOWP", False, True),
("Int8FC", "DNNLOWP", True, True),
]
for op_type, engine, do_fuse, skip_requantization in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized and not do_fuse
do_dequantize = "DNNLOWP" in engine and out_quantized and not skip_requantization
do_quantize_weight = (
engine == "DNNLOWP" and weight_quantized and len(outputs) > 0
)
do_prepack_weight = engine == "DNNLOWP" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(
X_min, X_max, preserve_activation_sparsity
)
w_q_param = None
if do_quantize_weight:
(
int8_given_tensor_fill,
w_q_param,
) = dnnlowp_utils.create_int8_given_tensor_fill(
W, "W_q", preserve_weight_sparsity
)
init_net.Proto().op.extend([int8_given_tensor_fill])
# Bias
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
b, "b_q", x_q_param, w_q_param
)
init_net.Proto().op.extend([int8_bias_tensor_fill])
if do_prepack_weight:
inputs = ["W_q" if do_quantize_weight else "W"]
if do_dequantize:
inputs += ["b_q" if do_quantize_weight else "b"]
pack = core.CreateOperator(
"Int8FCPackWeight",
inputs,
["W_packed", "B_q32"]
if do_dequantize and output_packed_bias
else ["W_packed"],
preserve_weight_sparsity=preserve_weight_sparsity,
in_scale=x_q_param.scale,
engine=engine,
)
init_net.Proto().op.extend([pack])
fc = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed"
if do_prepack_weight
else ("W_q" if do_quantize_weight else "W"),
"b_q" if do_quantize_weight else "b",
# "quant_param",
],
["Y_q" if do_dequantize else "Y"],
dequantize_output=not do_dequantize,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
device_option=gc,
)
if op_type != "FC":
if (do_dequantize and use_output_qparam) or (use_input_qparam and op_type == "Int8_FC"):
fc.input.extend(["quant_param"])
if (use_input_qparam and op_type == "Int8_FC"):
fc.input.extend(["X_quant_param"])
if do_quantize_weight or do_prepack_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(
fc, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([fc])
if fuse_relu and "DNNLOWP" not in engine:
net.Relu(["Y"], "Y")
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
if use_output_qparam and do_dequantize and op_type != "FC":
ref_output = outputs[0][0]
ref_output_min = 0 if ref_output.size == 0 else ref_output.min()
ref_output_max = 0 if ref_output.size == 0 else ref_output.max()
q_param = dnnlowp_utils.choose_quantization_params(
ref_output_min, ref_output_max, preserve_activation_sparsity
)
q_param_scale = q_param.scale
q_param_zero_point = q_param.zero_point
else:
q_param_scale = None
q_param_zero_point = None
if not (use_input_qparam and op_type == "Int8FC"):
x_q_param_scale = None
x_q_param_zero_point = None
else:
x_q_param_scale = x_q_param.scale
x_q_param_zero_point = x_q_param.zero_point
run_conv_or_fc(
self,
init_net,
net,
X,
W,
b,
op_type,
engine,
None,
gc,
outputs,
q_param_scale,
q_param_zero_point,
x_q_param_scale,
x_q_param_zero_point,
)
if output_packed_bias and do_prepack_weight and do_dequantize:
bias_int32 = self.ws.blobs["B_q32"].fetch()
if do_quantize_weight:
np.testing.assert_equal(
bias_int32[0], np.round(b / (x_q_param.scale * w_q_param.scale))
)
np.testing.assert_equal(bias_int32[0].dtype, np.int32)
shapes, types = workspace.InferShapesAndTypes(
[init_net, net],
blob_dimensions={
"X": [batch_size, input_channels],
"W": [output_channels, input_channels],
"b": [output_channels],
"quant_param": [1],
"X_quant_param": [1],
},
blob_types={
"X": core.DataType.FLOAT,
"W": core.DataType.FLOAT,
"b": core.DataType.FLOAT,
"quant_param": core.DataType.FLOAT,
"X_quant_param": core.DataType.FLOAT,
},
)
assert (
"Y" in shapes and "Y" in types
), "Failed to infer the shape or type of Y"
self.assertEqual(shapes["Y"], [batch_size, output_channels])
self.assertEqual(types["Y"], core.DataType.FLOAT)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
|
pytorch-master
|
caffe2/quantization/server/fully_connected_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import assume, given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpPoolTest(hu.HypothesisTestCase):
@given(
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
size=st.integers(1, 20),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_max_pool(
self,
stride,
pad,
kernel,
size,
input_channels,
batch_size,
order,
in_quantized,
gc,
dc,
):
assume(kernel <= size)
assume(pad < kernel)
C = input_channels
N = batch_size
H = W = size
min_ = -10
max_ = 20
if order == "NCHW":
X = np.round(np.random.rand(N, C, H, W) * (max_ - min_) + min_)
elif order == "NHWC":
X = np.round(np.random.rand(N, H, W, C) * (max_ - min_) + min_)
X = X.astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("MaxPool", ""),
("MaxPool", "DNNLOWP"),
("Int8MaxPool", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
max_pool = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X"],
["Y_q" if engine == "DNNLOWP" else "Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([max_pool])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
# Y_i = max(X_j) so the only error is in quantization of inputs
check_quantized_results_close(outputs, ref=X)
@given(
ndim=st.integers(2, 3),
stride=st.integers(1, 1),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
size=st.integers(2, 2),
input_channels=st.integers(1, 1),
batch_size=st.integers(2, 2),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_average_pool(
self,
ndim,
stride,
pad,
kernel,
size,
input_channels,
batch_size,
order,
in_quantized,
gc,
dc,
):
kernel = 2 # Only kernel size 2 is supported
assume(kernel <= size)
assume(pad < kernel)
C = input_channels
N = batch_size
strides = (stride,) * ndim
pads = (pad,) * (ndim * 2)
kernels = (kernel,) * ndim
sizes = (size,) * ndim
# X has scale 1, so no input quantization error
min_ = -100
max_ = min_ + 255
if order == "NCHW":
X = np.round(np.random.rand(*((N, C) + sizes)) * (max_ - min_) + min_)
X = X.astype(np.float32)
X[(0,) * (ndim + 2)] = min_
X[(0,) * (ndim + 1) + (1,)] = max_
elif order == "NHWC":
X = np.round(np.random.rand(*((N,) + sizes + (C,))) * (max_ - min_) + min_)
X = X.astype(np.float32)
X[(0,) * (ndim + 2)] = min_
X[(0, 1) + (0,) * ndim] = max_
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("AveragePool", ""),
("AveragePool", "DNNLOWP"),
("Int8AveragePool", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
max_pool = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X"],
["Y_q" if engine == "DNNLOWP" else "Y"],
strides=strides,
kernels=kernels,
pads=pads,
order=order,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([max_pool])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/pool_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpGroupNormTest(hu.HypothesisTestCase):
@given(
N=st.integers(0, 4),
G=st.integers(2, 4),
K=st.integers(2, 12),
H=st.integers(4, 16),
W=st.integers(4, 16),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
weight_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_group_norm(
self,
N,
G,
K,
H,
W,
order,
in_quantized,
out_quantized,
weight_quantized,
gc,
dc,
):
C = G * K
X = np.random.rand(N, C, H, W).astype(np.float32) * 5.0 - 1.0
if order == "NHWC":
X = utils.NCHW2NHWC(X)
gamma = np.random.rand(C).astype(np.float32) * 2.0 - 1.0
beta = np.random.randn(C).astype(np.float32) - 0.5
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("GroupNorm", ""),
("GroupNorm", "DNNLOWP"),
("Int8GroupNorm", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
do_quantize_weight = (
engine == "DNNLOWP" and weight_quantized and len(outputs) > 0
)
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
if do_quantize_weight:
int8_given_tensor_fill, gamma_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
gamma, "gamma_q"
)
net.Proto().op.extend([int8_given_tensor_fill])
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
X_q_param = dnnlowp_utils.choose_quantization_params(X_min, X_max)
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
beta, "beta_q", X_q_param, gamma_q_param
)
net.Proto().op.extend([int8_bias_tensor_fill])
group_norm = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"gamma_q" if do_quantize_weight else "gamma",
"beta_q" if do_quantize_weight else "beta",
],
["Y_q" if do_dequantize else "Y"],
dequantize_output=0 if do_dequantize else 1,
group=G,
order=order,
is_test=True,
engine=engine,
device_option=gc,
)
if do_quantize_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(group_norm, outputs[0][0])
net.Proto().op.extend([group_norm])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("gamma").feed(gamma, device_option=gc)
self.ws.create_blob("beta").feed(beta, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs, atol_scale=2.0)
|
pytorch-master
|
caffe2/quantization/server/group_norm_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPDequantizeOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_dequantize(self, size, is_empty, gc, dc):
if is_empty:
size = 0
min_ = -10.0
max_ = 20.0
X = (np.random.rand(size) * (max_ - min_) + min_).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_type_list = ["Dequantize", "Int8Dequantize"]
engine = "DNNLOWP"
outputs.append(Output(X, op_type="", engine=""))
for op_type in op_type_list:
net = core.Net("test_net")
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
dequantize = core.CreateOperator(
op_type, ["X_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/dequantize_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
generate_conv_inputs,
generate_convnd_inputs,
run_conv_or_fc,
)
from hypothesis import assume, given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpConvTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
weight_quantized=st.booleans(),
prepack_weight=st.booleans(),
share_col_buffer=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_conv_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
weight_quantized,
prepack_weight,
share_col_buffer,
preserve_activation_sparsity,
preserve_weight_sparsity,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume((not prepack_weight) or order == "NHWC")
X, W, b = generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP"),
("Conv", "DNNLOWP_16"),
("Int8Conv", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
# If output scale/zp aren't set, it gets computed from ref fp32 op
# in DNNLOWP, which isn't possible when we quantize input weights.
# Make sure atleast one output is collected to compute output
# scale/zp.
do_quantize_weight = (
engine == "DNNLOWP" and weight_quantized and len(outputs) > 0
)
do_prepack_weight = engine == "DNNLOWP" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(
X_min, X_max, preserve_activation_sparsity
)
if do_quantize_weight:
int8_given_tensor_fill, w_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
W, "W_q", preserve_weight_sparsity
)
init_net.Proto().op.extend([int8_given_tensor_fill])
# Bias
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
b, "b_q", x_q_param, w_q_param
)
init_net.Proto().op.extend([int8_bias_tensor_fill])
if do_prepack_weight:
inputs = ["W_q" if do_quantize_weight else "W"]
if do_dequantize:
inputs += ["b_q" if do_quantize_weight else "b"]
pack = core.CreateOperator(
"Int8ConvPackWeight",
inputs,
["W_packed"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
in_scale=x_q_param.scale,
)
init_net.Proto().op.extend([pack])
conv = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed"
if do_prepack_weight
else ("W_q" if do_quantize_weight else "W"),
"b_q" if do_quantize_weight else "b",
],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
shared_buffer=(1 if share_col_buffer else 0),
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
device_option=gc,
)
if do_quantize_weight or do_prepack_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
share_col_buffer=st.booleans(),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_conv_relu_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
share_col_buffer,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume(order == "NHWC" or input_channels_per_group <= 8 and output_channels_per_group <= 8)
X, W, b = generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("ConvRelu", "DNNLOWP"),
("ConvRelu", "DNNLOWP_16"),
("Int8ConvRelu", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if "DNNLOWP" in engine:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
conv = core.CreateOperator(
op_type,
["X_q", "W", "b"],
["Y_q"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
shared_buffer=(1 if share_col_buffer else 0),
group=group,
device_option=gc,
)
net.Proto().op.extend([conv])
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
else:
conv = core.CreateOperator(
op_type,
["X", "W", "b"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
shared_buffer=(1 if share_col_buffer else 0),
engine=engine,
group=group,
device_option=gc,
)
net.Proto().op.extend([conv])
relu = core.CreateOperator(
"Relu", ["Y"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([relu])
run_conv_or_fc(
self, None, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs)
def _test_dnnlowp_nd_int(
self,
stride,
pad,
kernels,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
prepack_weight,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume((not prepack_weight) or order == "NHWC")
ndim = len(kernels)
X, W, b = generate_convnd_inputs(
(stride,) * ndim,
(pad,) * ndim,
kernels,
(dilation,) * ndim,
(size,) * ndim,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [("Conv", ""), ("Conv", "DNNLOWP_16"), ("Int8Conv", "DNNLOWP")]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
# If output scale/zp aren't set, it gets computed from ref fp32 op
# in DNNLOWP, which isn't possible when we quantize input weights.
# Make sure atleast one output is collected to compute output
# scale/zp.
do_quantize_weight = engine == "DNNLOWP" and len(outputs) > 0
do_prepack_weight = engine == "DNNLOWP" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(X_min, X_max)
if do_quantize_weight:
int8_given_tensor_fill, w_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
W, "W_q"
)
init_net.Proto().op.extend([int8_given_tensor_fill])
# Bias
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
b, "b_q", x_q_param, w_q_param
)
init_net.Proto().op.extend([int8_bias_tensor_fill])
if do_prepack_weight:
inputs = ["W_q" if do_quantize_weight else "W"]
if do_dequantize:
inputs += ["b_q" if do_quantize_weight else "b"]
pack = core.CreateOperator(
"Int8ConvPackWeight",
inputs,
["W_packed"],
strides=[stride] * ndim,
kernels=kernels,
dilations=[dilation] * ndim,
pads=[pad] * (ndim * 2),
engine=engine,
group=group,
in_scale=x_q_param.scale,
)
init_net.Proto().op.extend([pack])
conv = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed"
if do_prepack_weight
else ("W_q" if do_quantize_weight else "W"),
"b_q" if do_quantize_weight else "b",
],
["Y_q" if do_dequantize else "Y"],
strides=[stride] * ndim,
kernels=kernels,
dilations=[dilation] * ndim,
pads=[pad] * (ndim * 2),
order=order,
dequantize_output=not do_dequantize,
engine=engine,
group=group,
device_option=gc,
)
if do_quantize_weight or do_prepack_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(conv, outputs[0][0])
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs)
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
temporal_kernels=st.sampled_from([1, 5]),
spatial_kernels=st.sampled_from([1, 3]),
dilation=st.integers(1, 1),
size=st.sampled_from([5, 8]),
group=st.integers(1, 2),
input_channels_per_group=st.sampled_from([2, 3]),
output_channels_per_group=st.sampled_from([2, 3]),
batch_size=st.integers(0, 2),
order=st.sampled_from(["NCHW", "NHWC"]),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_dnnlowp_conv3d_int(
self,
stride,
pad,
temporal_kernels,
spatial_kernels,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
prepack_weight,
gc,
dc,
):
self._test_dnnlowp_nd_int(
stride,
pad,
(temporal_kernels,) + (spatial_kernels,) * 2,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
prepack_weight,
gc,
dc,
)
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernels=st.sampled_from([1, 3]),
dilation=st.integers(1, 1),
size=st.sampled_from([5, 8]),
group=st.integers(1, 2),
input_channels_per_group=st.sampled_from([2, 3]),
output_channels_per_group=st.sampled_from([2, 3]),
batch_size=st.integers(0, 2),
order=st.sampled_from(["NCHW", "NHWC"]),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_conv1d_int(
self,
stride,
pad,
kernels,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
prepack_weight,
gc,
dc,
):
self._test_dnnlowp_nd_int(
stride,
pad,
(kernels,),
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
prepack_weight,
gc,
dc,
)
|
pytorch-master
|
caffe2/quantization/server/conv_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
def mse(x, xh):
d = (x - xh).reshape(-1)
return 0 if len(d) == 0 else np.sqrt(np.matmul(d, d.transpose())) / len(d)
class FullyConnectedFP16Test(hu.HypothesisTestCase):
@given(
input_channels=st.integers(128, 256),
output_channels=st.integers(128, 256),
batch_size=st.integers(128, 256),
empty_batch=st.booleans(),
**hu.gcs_cpu_only
)
def test_fully_connected(self, input_channels, output_channels, batch_size, empty_batch, gc, dc):
if empty_batch:
batch_size = 0
W = np.random.randn(output_channels, input_channels).astype(np.float32)
X = np.random.randn(batch_size, input_channels).astype(np.float32)
b = np.random.randn(output_channels).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "engine", "order"])
order = "NHWC"
net = core.Net("test_net")
engine = "FAKE_FP16"
fc = core.CreateOperator(
"FC", ["X", "W", "b"], ["Y"], order=order, engine=engine, device_option=gc
)
net.Proto().op.extend([fc])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("W").feed(W, device_option=gc)
self.ws.create_blob("b").feed(b, device_option=gc)
self.ws.run(net)
output = Output(Y=self.ws.blobs["Y"].fetch(), engine=engine, order=order)
# Mimic the quantization in python
Wh = W.astype(np.float16)
Xh = X.astype(np.float16)
bh = b.astype(np.float16)
bbh = np.outer(np.ones(batch_size, dtype=np.float16), bh)
assert bbh.dtype == np.float16
Yrefh = np.matmul(Xh, Wh.transpose()) + bbh
assert Yrefh.dtype == np.float16
bb = np.outer(np.ones(batch_size, dtype=np.float32), b)
Yref = np.matmul(X, W.transpose()) + bb
assert Yref.dtype == np.float32
# The error between plain->quantized, and plain->python_quantized
# should be very close
mse_c2 = mse(Yref, output.Y)
mse_py = mse(Yref, Yrefh)
print(np.abs(mse_c2 - mse_py))
assert np.isclose(mse_c2, mse_py, atol=1e-3), np.abs(mse_c2 - mse_py)
|
pytorch-master
|
caffe2/quantization/server/fully_connected_fp16_test.py
|
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPResizeNearest3DOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(1, 3),
T=st.integers(1, 16),
H=st.integers(10, 300),
W=st.integers(10, 300),
C=st.integers(1, 32),
scale_t=st.floats(0.25, 4.0) | st.just(2.0),
scale_w=st.floats(0.25, 4.0) | st.just(2.0),
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(max_examples=5, deadline=None)
def test_resize_nearest(self, N, T, H, W, C, scale_t, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, T, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
"Int8ResizeNearest3D",
["X_q"],
["Y_q"],
temporal_scale=scale_t,
width_scale=scale_w,
height_scale=scale_h,
engine="DNNLOWP",
)
net = core.Net("test_net")
net.Proto().op.extend([quantize, resize_nearest])
workspace.FeedBlob("X", X)
workspace.RunNetOnce(net)
X_q = workspace.FetchInt8Blob("X_q").data
Y_q = workspace.FetchInt8Blob("Y_q").data
def resize_nearest_ref(X):
outT = np.int32(T * scale_t)
outH = np.int32(H * scale_h)
outW = np.int32(W * scale_w)
outT_idxs, outH_idxs, outW_idxs = np.meshgrid(
np.arange(outT), np.arange(outH), np.arange(outW), indexing="ij"
)
inT_idxs = np.minimum(outT_idxs / scale_t, T - 1).astype(np.int32)
inH_idxs = np.minimum(outH_idxs / scale_h, H - 1).astype(np.int32)
inW_idxs = np.minimum(outW_idxs / scale_w, W - 1).astype(np.int32)
Y = X[:, inT_idxs, inH_idxs, inW_idxs, :]
return Y
Y_q_ref = resize_nearest_ref(X_q)
np.testing.assert_allclose(Y_q, Y_q_ref)
|
pytorch-master
|
caffe2/quantization/server/resize_nearest_3d_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace
from caffe2.python.fb import hardcode_scale_zp # type: ignore[import]
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
generate_conv_inputs,
run_conv_or_fc,
)
from hypothesis import assume, given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class GroupWiseDNNLowPOpConvTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
prepack_weight=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_groupwise_dnnlowp_conv_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
prepack_weight,
preserve_activation_sparsity,
preserve_weight_sparsity,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume((not prepack_weight) or order == "NHWC")
X, W, b = generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=True,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP"),
("Conv", "DNNLOWP_16"),
("Int8Conv", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
do_prepack_weight = engine == "DNNLOWP" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
if do_prepack_weight:
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = hardcode_scale_zp.choose_quantization_params(X_min, X_max)
inputs = ["W"]
if do_dequantize:
inputs += ["b"]
pack = core.CreateOperator(
"Int8ConvPackWeight",
inputs,
["W_packed"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=1,
in_scale=x_q_param.scale,
)
init_net.Proto().op.extend([pack])
conv = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed" if do_prepack_weight else "W",
"b",
],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=1,
device_option=gc,
)
if do_dequantize or do_prepack_weight:
# groupwise quantization only works with static quantization
# so we need to set quantization parameters
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize",
["Y_q"],
["Y"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_groupwise_dnnlowp_conv_relu_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
gc,
dc,
):
assume(group == 1 or dilation == 1)
X, W, b = generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
True, # group-wise
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("ConvRelu", "DNNLOWP"),
("ConvRelu", "DNNLOWP_16"),
("Int8ConvRelu", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if "DNNLOWP" in engine:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
conv = core.CreateOperator(
op_type,
["X_q", "W", "b"],
["Y_q"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
group=group,
quantize_groupwise=1,
device_option=gc,
)
# groupwise quantization only works with static quantization
# so we need to set quantization parameters
dnnlowp_utils.add_quantization_param_args(conv, outputs[0][0])
net.Proto().op.extend([conv])
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
else:
conv = core.CreateOperator(
op_type,
["X", "W", "b"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
group=group,
device_option=gc,
)
net.Proto().op.extend([conv])
relu = core.CreateOperator(
"Relu", ["Y"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([relu])
run_conv_or_fc(
self, None, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/conv_groupwise_dnnlowp_op_test.py
|
pytorch-master
|
caffe2/quantization/server/__init__.py
|
|
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPResizeNearestOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(0, 3),
H=st.integers(10, 300),
W=st.integers(10, 300),
C=st.integers(1, 32),
scale_w=st.floats(0.25, 4.0) | st.just(2.0),
scale_h=st.floats(0.25, 4.0) | st.just(2.0),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=50)
def test_resize_nearest(self, N, H, W, C, scale_w, scale_h, gc, dc):
X = np.round(np.random.rand(N, H, W, C) * 255).astype(np.float32)
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
resize_nearest = core.CreateOperator(
"Int8ResizeNearest",
["X_q"],
["Y_q"],
width_scale=scale_w,
height_scale=scale_h,
engine="DNNLOWP",
)
net = core.Net("test_net")
net.Proto().op.extend([quantize, resize_nearest])
workspace.FeedBlob("X", X)
workspace.RunNetOnce(net)
X_q = workspace.FetchInt8Blob("X_q").data
Y_q = workspace.FetchInt8Blob("Y_q").data
def resize_nearest_ref(X):
outH = np.int32(H * scale_h)
outW = np.int32(W * scale_w)
outH_idxs, outW_idxs = np.meshgrid(
np.arange(outH), np.arange(outW), indexing="ij"
)
inH_idxs = np.minimum(outH_idxs / scale_h, H - 1).astype(np.int32)
inW_idxs = np.minimum(outW_idxs / scale_w, W - 1).astype(np.int32)
Y = X[:, inH_idxs, inW_idxs, :]
return Y
Y_q_ref = resize_nearest_ref(X_q)
np.testing.assert_allclose(Y_q, Y_q_ref)
|
pytorch-master
|
caffe2/quantization/server/resize_nearest_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPGatherOpTest(hu.HypothesisTestCase):
@given(
dim1=st.integers(256, 512),
dim2=st.integers(32, 256),
is_empty=st.booleans(),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_gather(self, dim1, dim2, is_empty, in_quantized, out_quantized, gc, dc):
if is_empty:
dim2 = 0
# FIXME : DNNLOWP Gather doesn't support quantized input and
# dequantized output
if in_quantized:
out_quantized = True
data = (np.random.rand(dim1) * 2 - 1).astype(np.float32)
index = np.floor(np.random.rand(dim2) * dim1).astype(np.int32)
Output = collections.namedtuple("Output", ["out", "op_type", "engine"])
outputs = []
op_engine_list = [
("Gather", ""),
("Gather", "DNNLOWP"),
("Int8Gather", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize_data = core.CreateOperator(
"Quantize", ["data"], ["data_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_data])
gather = core.CreateOperator(
op_type,
["data_q" if do_quantize else "data", "index"],
["out_q" if do_dequantize else "out"],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([gather])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["out_q"], ["out"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("data").feed(data, device_option=gc)
self.ws.create_blob("index").feed(index, device_option=gc)
self.ws.run(net)
outputs.append(
Output(out=self.ws.blobs["out"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs, ref=data)
|
pytorch-master
|
caffe2/quantization/server/gather_dnnlowp_op_test.py
|
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPChannelShuffleOpsTest(hu.HypothesisTestCase):
@given(
channels_per_group=st.integers(min_value=1, max_value=5),
groups=st.sampled_from([1, 4, 8, 9]),
n=st.integers(0, 2),
order=st.sampled_from(["NCHW", "NHWC"]),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_channel_shuffle(self, channels_per_group, groups, n, order, gc, dc):
X = np.round(np.random.rand(n, channels_per_group * groups, 5, 6) * 255).astype(
np.float32
)
if n != 0:
X[0, 0, 0, 0] = 0
X[0, 0, 0, 1] = 255
if order == "NHWC":
X = utils.NCHW2NHWC(X)
net = core.Net("test_net")
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
channel_shuffle = core.CreateOperator(
"ChannelShuffle",
["X_q"],
["Y_q"],
group=groups,
kernel=1,
order=order,
engine="DNNLOWP",
)
dequantize = core.CreateOperator("Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP")
net.Proto().op.extend([quantize, channel_shuffle, dequantize])
workspace.FeedBlob("X", X)
workspace.RunNetOnce(net)
Y = workspace.FetchBlob("Y")
def channel_shuffle_ref(X):
if order == "NHWC":
X = utils.NHWC2NCHW(X)
Y_r = X.reshape(
X.shape[0], groups, X.shape[1] // groups, X.shape[2], X.shape[3]
)
Y_trns = Y_r.transpose((0, 2, 1, 3, 4))
Y_reshaped = Y_trns.reshape(X.shape)
if order == "NHWC":
Y_reshaped = utils.NCHW2NHWC(Y_reshaped)
return Y_reshaped
Y_ref = channel_shuffle_ref(X)
np.testing.assert_allclose(Y, Y_ref)
@given(
channels_per_group=st.integers(min_value=32, max_value=128),
n=st.integers(0, 2),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_channel_shuffle_fast_path(self, channels_per_group, n, gc, dc):
order = "NHWC"
groups = 4
X = np.round(np.random.rand(n, channels_per_group * groups, 5, 6) * 255).astype(
np.float32
)
if n != 0:
X[0, 0, 0, 0] = 0
X[0, 0, 0, 1] = 255
X = utils.NCHW2NHWC(X)
net = core.Net("test_net")
quantize = core.CreateOperator("Quantize", ["X"], ["X_q"], engine="DNNLOWP")
channel_shuffle = core.CreateOperator(
"ChannelShuffle",
["X_q"],
["Y_q"],
group=groups,
kernel=1,
order=order,
engine="DNNLOWP",
)
dequantize = core.CreateOperator("Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP")
net.Proto().op.extend([quantize, channel_shuffle, dequantize])
workspace.FeedBlob("X", X)
workspace.RunNetOnce(net)
Y = workspace.FetchBlob("Y")
def channel_shuffle_ref(X):
if order == "NHWC":
X = utils.NHWC2NCHW(X)
Y_r = X.reshape(
X.shape[0], groups, X.shape[1] // groups, X.shape[2], X.shape[3]
)
Y_trns = Y_r.transpose((0, 2, 1, 3, 4))
Y_reshaped = Y_trns.reshape(X.shape)
if order == "NHWC":
Y_reshaped = utils.NCHW2NHWC(Y_reshaped)
return Y_reshaped
Y_ref = channel_shuffle_ref(X)
np.testing.assert_allclose(Y, Y_ref)
|
pytorch-master
|
caffe2/quantization/server/channel_shuffle_dnnlowp_op_test.py
|
import collections
from itertools import product
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
avoid_vpmaddubsw_overflow_fc,
check_quantized_results_close,
)
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPBatchMatMulOpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
m=st.integers(0, 32),
n=st.integers(4, 32),
k=st.integers(4, 32),
batch_size=st.integers(0, 4),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_dnnlowp_batch_matmul_int(self, m, n, k, batch_size, gc, dc):
# A and B have scale 1, so exactly represented after quantization
A_min = -77
A_max = A_min + 255
A = np.round(np.random.rand(batch_size, m, k) * 255 + A_min)
A = A.astype(np.float32)
# input channels 0 and 1 are all A_min to avoid overflow from vpmaddubsw
# when multiplied with B_min and B_max
if batch_size > 0 and m > 0:
A[0, :, 0] = A_min
A[0, 0, 1] = A_max
B_min = -100
B_max = B_min + 255
B = np.round(np.random.rand(batch_size, n, k) * 255 + B_min)
B = B.astype(np.float32)
if batch_size > 0:
B[0, 0, 0] = B_min
B[0, 1, 0] = B_max
for i in range(batch_size):
avoid_vpmaddubsw_overflow_fc(
m, k, n, A[i,], A_min, A_max, B[i,], B_min, B_max
)
for trans_a, trans_b in product([0, 1], [0, 1]):
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("BatchMatMul", ""),
("BatchMatMul", "DNNLOWP"),
("BatchMatMul", "DNNLOWP_16"),
("Int8BatchMatMul", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if "DNNLOWP" in engine:
quantize_A = core.CreateOperator(
"Quantize", ["A"], ["A_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_A])
quantize_B = core.CreateOperator(
"Quantize", ["B"], ["B_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_B])
batch_matmul = core.CreateOperator(
op_type,
[
"A_q" if "DNNLOWP" in engine else "A",
"B_q" if "DNNLOWP" in engine else "B",
],
["Y_q" if "DNNLOWP" in engine else "Y"],
trans_a=trans_a,
trans_b=trans_b,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([batch_matmul])
if "DNNLOWP" in engine:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("A").feed(
np.transpose(A, (0, 2, 1)) if trans_a else A, device_option=gc
)
self.ws.create_blob("B").feed(
B if trans_b else np.transpose(B, (0, 2, 1)), device_option=gc
)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
# correctness test with no quantization error in inputs
@given(
m=st.integers(0, 32),
n=st.integers(4, 32),
k=st.integers(4, 32),
C_1=st.integers(0, 3), # number of batch dims
C_2=st.integers(0, 3),
A_quantized=st.booleans(),
B_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
@settings(deadline=2000)
def test_dnnlowp_batch_matmul_int_constant_B(
self, m, n, k, C_1, C_2, A_quantized, B_quantized, out_quantized, gc, dc
):
batch_dims = tuple(np.random.randint(3, size=max(C_1, C_2)))
batch_dims_A = batch_dims[-C_1:]
batch_dims_B = batch_dims[-C_2:]
A = np.zeros(batch_dims_A + (m, k)).astype(np.float32)
B = np.zeros(batch_dims_B + (n, k)).astype(np.float32)
if np.prod(batch_dims) > 0:
for index in np.ndindex(batch_dims_A):
# When both input and output are float, each input of the batch has
# scale 1 but with different offset, so input-wise quantization
# shouldn't have any input quantization error
# A_min = -77 if (A_quantized or out_quantized) else -77 + i
A_min = -77
A_max = A_min + 255
A[index] = np.round(np.random.rand(m, k) * 255 + A_min)
# input channels 0 and 1 are all A_min to avoid overflow from vpmaddubsw
# when multiplied with B_min and B_max
A[index][:, 0] = A_min
if m != 0:
A[index][0, 1] = A_max
i = 0
for index in np.ndindex(batch_dims_B):
# When weight is quantized in a lazy manner, each input of the batch has
# scale 1 but with different offset, so input-wise quantization
# shouldn't have any input quantization error when weight is quantized
# in a lazy manner.
B_min = -100 if B_quantized else -100 + i
# B_min = -100
B_max = B_min + 255
B[index] = np.round(np.random.rand(n, k) * 255 + B_min)
B[index][0, 0] = B_min
B[index][1, 0] = B_max
if C_1 > C_2:
# A has more dims
for outer_index in np.ndindex(batch_dims_A[: C_1 - C_2]):
avoid_vpmaddubsw_overflow_fc(
m,
k,
n,
A[outer_index] if C_2 == 0 else A[outer_index + index],
A_min,
A_max,
B[index],
B_min,
B_max,
)
else:
avoid_vpmaddubsw_overflow_fc(
m, k, n, A[index[-C_1:]], A_min, A_max, B[index], B_min, B_max
)
i += 1
for trans_a, trans_b in product([0, 1], [0, 1]):
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("BatchMatMul", ""),
("BatchMatMul", "DNNLOWP"),
("Int8BatchMatMul", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize_A = "DNNLOWP" in engine and A_quantized
do_quantize_B = "DNNLOWP" in engine and B_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize_A:
quantize_A = core.CreateOperator(
"Quantize", ["A"], ["A_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_A])
if do_quantize_B:
int8_given_tensor_fill, B_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
B if trans_b else B.swapaxes(-1, -2), "B_q"
)
net.Proto().op.extend([int8_given_tensor_fill])
batch_matmul = core.CreateOperator(
op_type,
["A_q" if do_quantize_A else "A", "B_q" if do_quantize_B else "B"],
["Y_q" if do_dequantize else "Y"],
trans_a=trans_a,
trans_b=trans_b,
broadcast=True,
constant_B=True,
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
)
if do_quantize_B:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(
batch_matmul, outputs[0][0]
)
net.Proto().op.extend([batch_matmul])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("A").feed(
A.swapaxes(-1, -2) if trans_a else A, device_option=gc
)
self.ws.create_blob("B").feed(
B if trans_b else B.swapaxes(-1, -2), device_option=gc
)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
if np.prod(batch_dims) > 0:
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/batch_matmul_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpSumOpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
N=st.integers(32, 256),
M=st.integers(1, 3),
is_empty=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_elementwise_sum_int(self, N, M, is_empty, gc, dc):
if is_empty:
N = 0
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
X_q_names = M * [None]
for i in range(M):
X = np.random.randint(-128, 127, N, np.int8).astype(np.float32)
if N != 0:
X[0] = -128
X[-1] = 127
inputs[i] = X
X_names[i] = chr(ord("A") + i)
X_q_names[i] = X_names[i] + "_q"
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Sum", ""), ("Sum", "DNNLOWP"), ("Int8Sum", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
for i in range(M):
quantize = core.CreateOperator(
"Quantize",
X_names[i],
X_q_names[i],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
sum_ = core.CreateOperator(
op_type,
X_q_names if engine == "DNNLOWP" else X_names,
["Y_q" if engine == "DNNLOWP" else "Y"],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([sum_])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
for i in range(M):
self.ws.create_blob(X_names[i]).feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
# correctness test with no quantization error in inputs
@given(N=st.integers(32, 256), M=st.integers(1, 3), **hu.gcs_cpu_only)
def test_dnnlowp_elementwise_sum_int_inplace(self, N, M, gc, dc):
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
X_q_names = M * [None]
for i in range(M):
X = np.random.randint(-128, 127, N, np.int8).astype(np.float32)
X[0] = -128
X[-1] = 127
inputs[i] = X
X_names[i] = chr(ord("A") + i)
X_q_names[i] = X_names[i] + "_q"
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Sum", ""), ("Sum", "DNNLOWP"), ("Int8Sum", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
for i in range(M):
quantize = core.CreateOperator(
"Quantize",
X_names[i],
X_q_names[i],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
sum_ = core.CreateOperator(
op_type,
X_q_names if engine == "DNNLOWP" else X_names,
[X_q_names[0] if engine == "DNNLOWP" else X_names[0]],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([sum_])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize",
[X_q_names[0]],
[X_names[0]],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([dequantize])
for i in range(M):
self.ws.create_blob(X_names[i]).feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(
Y=self.ws.blobs[X_names[0]].fetch(), op_type=op_type, engine=engine
)
)
check_quantized_results_close(outputs)
# correctness test with no quantization error in inputs
@given(N=st.integers(32, 256), M=st.integers(1, 3), **hu.gcs_cpu_only)
def test_dnnlowp_elementwise_sum_relu_int(self, N, M, gc, dc):
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
X_q_names = M * [None]
for i in range(M):
X = np.random.randint(-128, 127, N, np.int8).astype(np.float32)
X[0] = -128
X[-1] = 127
inputs[i] = X
X_names[i] = chr(ord("A") + i)
X_q_names[i] = X_names[i] + "_q"
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("SumRelu", ""),
("SumRelu", "DNNLOWP"),
("Int8SumRelu", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
for i in range(M):
quantize = core.CreateOperator(
"Quantize",
X_names[i],
X_q_names[i],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
sum_relu = core.CreateOperator(
op_type,
X_q_names if engine == "DNNLOWP" else X_names,
["Y_q" if engine == "DNNLOWP" else "Y"],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([sum_relu])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
for i in range(M):
self.ws.create_blob(X_names[i]).feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
# correctness test with no quantization error in inputs
@given(N=st.integers(32, 256), M=st.integers(1, 3), **hu.gcs_cpu_only)
def test_dnnlowp_elementwise_sum_relu_int_inplace(self, N, M, gc, dc):
# All inputs have scale 1, so exactly represented after quantization
inputs = M * [None]
X_names = M * [None]
X_q_names = M * [None]
for i in range(M):
X = np.random.randint(-128, 127, N, np.int8).astype(np.float32)
X[0] = -128
X[-1] = 127
inputs[i] = X
X_names[i] = chr(ord("A") + i)
X_q_names[i] = X_names[i] + "_q"
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("SumRelu", ""),
("SumRelu", "DNNLOWP"),
("Int8SumRelu", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
for i in range(M):
quantize = core.CreateOperator(
"Quantize",
X_names[i],
X_q_names[i],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
sum_relu = core.CreateOperator(
op_type,
X_q_names if engine == "DNNLOWP" else X_names,
[X_q_names[0] if engine == "DNNLOWP" else X_names[0]],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([sum_relu])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize",
[X_q_names[0]],
[X_names[0]],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([dequantize])
for i in range(M):
self.ws.create_blob(X_names[i]).feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(
Y=self.ws.blobs[X_names[0]].fetch(), op_type=op_type, engine=engine
)
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/elementwise_sum_dnnlowp_op_test.py
|
import collections
import numpy as np
from caffe2.python import utils, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import assume
# This function asserts quantized results (output[1:]) are close enough to
# floating point results (output[0]).
# The error bound is derived based on assumption that there's no input
# quantization error.
def check_quantized_results_close(outputs, ref=None, symmetric=False, atol_scale=0.53):
if ref is None:
ref = outputs[0][0]
if ref.size == 0:
return
ref_min = min(np.min(ref), 0)
ref_max = max(np.max(ref), 0)
if symmetric:
ref_scale = 2 * max(abs(ref_max), abs(ref_min)) / 255
else:
ref_scale = (ref_max - ref_min) / 255
# should be divided by 2 in an exact math, but divide by 1.9 here
# considering finite precision in floating-point numbers
atol = ref_scale * atol_scale
for o in outputs[1:]:
np.testing.assert_allclose(o[0], outputs[0][0], atol=atol, rtol=0)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
# Make sure we won't have overflows from vpmaddubsw instruction used in fbgemm)
def avoid_vpmaddubsw_overflow_fc(
batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 > (1 << 15) - 1:
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# Make sure we won't have overflows from vpmaddubsw instruction used in
# fbgemm (FIXME: this assumes fbgemm is used only for NHWC and im2col
# is done in a way that input_channels is the fastest moving
# dimension).
#
# strides, pads, kernels, dilations, and sizes should be tuples with the same dimension
# (2 for 2D conv, 3 for 3D conv, and so on)
def avoid_vpmaddubsw_overflow(
strides,
pads,
kernels,
dilations,
sizes,
input_channels,
output_channels,
batch_size,
X,
X_min,
X_max,
W,
W_min,
W_max,
):
ndim = len(sizes)
dkernels = tuple((dilations[i] * (kernels[i] - 1) + 1) for i in range(ndim))
size_cols = tuple(
(sizes[i] + 2 * pads[i] - dkernels[i]) // strides[i] + 1 for i in range(ndim)
)
for out_idx in np.ndindex((batch_size,) + size_cols + (output_channels,)):
b = out_idx[0]
oc = out_idx[-1]
o_spatial = out_idx[1:-1]
for filter_idx1, filter_idx2 in pairwise(
np.ndindex(kernels + (input_channels,))
):
f0 = filter_idx1[:-1]
ic0 = filter_idx1[-1]
f1 = filter_idx2[:-1]
ic1 = filter_idx2[-1]
i0s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f0[i]
for i in range(ndim)
)
i1s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f1[i]
for i in range(ndim)
)
w0 = W[(oc,) + f0 + (ic0,)] - 128 - W_min
w1 = W[(oc,) + f1 + (ic1,)] - 128 - W_min
if all(0 <= i0s[i] < sizes[i] for i in range(ndim)):
x0 = X[(b,) + i0s + (ic0,)] - X_min
else:
# padding
x0 = -X_min
if all(0 <= i1s[i] < sizes[i] for i in range(ndim)):
x1 = X[(b,) + i1s + (ic1,)] - X_min
else:
# padding
x1 = -X_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[(oc,) + f1 + (ic1,)] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 >= (1 << 15):
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[(oc,) + f1 + (ic1,)] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for out_idx in np.ndindex((batch_size,) + size_cols + (output_channels,)):
b = out_idx[0]
oc = out_idx[-1]
o_spatial = out_idx[1:-1]
for filter_idx1, filter_idx2 in pairwise(
np.ndindex(kernels + (input_channels,))
):
f0 = filter_idx1[:-1]
ic0 = filter_idx1[-1]
f1 = filter_idx2[:-1]
ic1 = filter_idx2[-1]
i0s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f0[i]
for i in range(ndim)
)
i1s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f1[i]
for i in range(ndim)
)
w0 = W[(oc,) + f0 + (ic0,)] - 128 - W_min
w1 = W[(oc,) + f1 + (ic1,)] - 128 - W_min
if all(0 <= i0s[i] < sizes[i] for i in range(ndim)):
x0 = X[(b,) + i0s + (ic0,)] - X_min
else:
# padding
x0 = -X_min
if all(0 <= i1s[i] < sizes[i] for i in range(ndim)):
x1 = X[(b,) + i1s + (ic1,)] - X_min
else:
# padding
x1 = -X_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# strides, pads, kernels, dilations, and sizes should be tuples with the same dimension
# (2 for 2D conv, 3 for 3D conv, and so on)
def generate_convnd_inputs(
strides,
pads,
kernels,
dilations,
sizes,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=False,
preserve_activation_sparsity=False,
preserve_weight_sparsity=False,
):
dim = len(sizes)
assume(all(len(a) == dim for a in [strides, pads, kernels, dilations]))
assume(all(sizes[d] >= dilations[d] * (kernels[d] - 1) + 1 for d in range(dim)))
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
depthwise_convolution = (
input_channels_per_group == 1 and output_channels_per_group == 1
)
assert input_channels > 1
assert output_channels > 1
# X and W have scale 1, so exactly represented after quantization
X_min = 0 if preserve_activation_sparsity else -77
X_max = X_min + 255
X_range = X_max - X_min
if depthwise_convolution and groupwise_quantization:
# For depthwise convolution, it's not enough to set input channel 0
# to all X_min to avoid overflow from vpmaddubsw
X_range /= 2
X = np.round(
np.random.rand(*((batch_size,) + tuple(sizes) + (input_channels,))) * X_range
+ X_min
)
X = X.astype(np.float32)
if (
batch_size != 0
and depthwise_convolution
and groupwise_quantization
and not preserve_activation_sparsity
):
# Put X_max in a position not to be paired with any padded value.
# Put X_min to all positions that can be paired with the X_max value.
#
# This is an example of a pattern for 3x3x3
# . . . . .
# . . . . .
# . . . . .
# . . . . .
# . . . . min
#
# . . . . .
# . . . . min
# . min max min .
# min . . . .
# . . . . .
#
# min . . . .
# . . . . .
# . . . . .
# . . . . .
# . . . . .
# Make sure we have enough dimension
assert X.shape[1] >= 3
assert all(X.shape[d + 1] >= kernels[d] + 2 for d in range(1, dim))
# Take subtensor we want to manipulate
X_sub = X[(0,) * (X.ndim - dim - 1) + (slice(None),) * dim + (0,)]
# Put X_max in the middle of the subtensor
X_sub[(1,) + tuple(kernels[d] // 2 + 1 for d in range(1, dim))] = X_max
# Put X_min to the positions that can be paired with X_max across
# the slowest moving dimension
X_sub[[[0, 2]] + [[kernels[d] + 1, 0] for d in range(1, dim)]] = X_min
# Put X_min to other positions that can be paired with X_max
for d1 in range(1, dim):
X_sub[
[[1]]
+ [[kernels[d2] // 2 + 1] for d2 in range(1, d1)]
+ [[kernels[d1] // 2, kernels[d1] // 2 + 2]]
+ [[kernels[d2] + 1, 0] for d2 in range(d1 + 1, dim)]
] = X_min
else:
# input channel 0 is all X_min to avoid overflow from vpmaddubsw when
# multiplied with W_min and W_max
X[..., 0] = X_min
if batch_size != 0:
X[(0,) * (X.ndim - 1) + (1,)] = X_max
if preserve_weight_sparsity:
W_min = -128
W_max = 100
else:
W_min = -100
W_max = W_min + 255
W = np.round(
np.random.rand(
*((output_channels,) + tuple(kernels) + (input_channels_per_group,))
)
* (W_max - W_min)
+ W_min
)
W = W.astype(np.float32)
if groupwise_quantization:
for g in range(group):
W[(g * output_channels_per_group,) + (0,) * (W.ndim - 1)] = W_min
if depthwise_convolution:
W[(g * output_channels_per_group, 1) + (0,) * (W.ndim - 2)] = W_max
else:
assert output_channels_per_group > 1
W[(g * output_channels_per_group + 1,) + (0,) * (W.ndim - 1)] = W_max
# Make sure each group has different ranges to really see the effect
# of group-wise quantization.
if not preserve_weight_sparsity:
W[
g * output_channels_per_group : (g + 1) * output_channels_per_group,
] += g
else:
W[(0,) + (0,) * (W.ndim - 1)] = W_min
W[(1,) + (0,) * (W.ndim - 1)] = W_max
different_range_per_group = groupwise_quantization and not preserve_weight_sparsity
for g in range(group):
avoid_vpmaddubsw_overflow(
strides,
pads,
kernels,
dilations,
sizes,
input_channels_per_group,
output_channels_per_group,
batch_size,
X[..., g * input_channels_per_group : (g + 1) * input_channels_per_group],
X_min,
X_max,
W[g * output_channels_per_group : (g + 1) * output_channels_per_group,],
W_min + (g if different_range_per_group else 0),
W_max + (g if different_range_per_group else 0),
)
if order == "NCHW":
X = utils.NHWC2NCHW(X)
W = utils.NHWC2NCHW(W)
b = np.random.randn(output_channels).astype(np.float32)
return X, W, b
def generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=False,
preserve_activation_sparsity=False,
preserve_weight_sparsity=False,
):
return generate_convnd_inputs(
(stride,) * 2,
(pad,) * 2,
(kernel,) * 2,
(dilation,) * 2,
(size,) * 2,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization,
preserve_activation_sparsity,
preserve_weight_sparsity,
)
def run_conv_or_fc(
test_case,
init_net,
net,
X,
W,
b,
op_type,
engine,
order,
gc,
outputs,
scale=None,
zero_point=None,
x_scale=None,
x_zero_point=None,
):
if order:
# Conv
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
else:
# FC
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
# We run DNNLOWP ops multiple times to test their first runs that
# do caching so exercises different code paths from the subsequent
# runs
# self.ws.run re-creates operator every time so this test covers
# cases when we have multiple nets sharing the same workspace
test_case.ws.create_blob("X").feed(X, device_option=gc)
test_case.ws.create_blob("W").feed(W, device_option=gc)
test_case.ws.create_blob("b").feed(b, device_option=gc)
if scale is not None and zero_point is not None:
with workspace.WorkspaceGuard(test_case.ws):
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"quant_param", float(scale), int(zero_point)
)
if x_scale is not None and x_zero_point is not None:
with workspace.WorkspaceGuard(test_case.ws):
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"X_quant_param", float(x_scale), int(x_zero_point)
)
if init_net:
test_case.ws.run(init_net)
for i in range(1 if engine == "" else 2):
test_case.ws.run(net)
Y = test_case.ws.blobs["Y"].fetch()
if order:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
else:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine))
# workspace.CreateNet + workspace.RunNet reuses the same operator
if engine != "":
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
if scale is not None and zero_point is not None:
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"quant_param", float(scale), int(zero_point)
)
if x_scale is not None and x_zero_point is not None:
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"X_quant_param", float(x_scale), int(x_zero_point)
)
if init_net:
workspace.RunNetOnce(init_net)
workspace.CreateNet(net)
for i in range(2):
workspace.RunNet(net)
Y = workspace.FetchBlob("Y")
if order:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
else:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine))
|
pytorch-master
|
caffe2/quantization/server/dnnlowp_test_utils.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
run_conv_or_fc
)
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPFullyConnectedAcc16OpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
# fbgemm currently only supports N a multiple of 64
@given(
input_channels=st.sampled_from([32, 64]),
output_channels=st.sampled_from([64, 128, 256]),
batch_size=st.sampled_from([0, 32, 64, 128, 256]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_fully_connected_acc16_int(
self,
input_channels,
output_channels,
batch_size,
in_quantized,
out_quantized,
gc,
dc,
):
# X and W have scale 1, so exactly represented after quantization
# This was made sure by having at least one 0 and one 255 for unsigned
# 8-bit tensors, and at least one -128 and one 127 for signed 8-bit
# tensors.
# Since fbgemm_acc16 accumulates to 16-bit, To avoid overflow, we use
# small numbers except for those 0, 255, -128, and 127, for this test
# We also make sure 255, -128, or 127 are not multiplied together by
# putting them in different input channels and the corresponding input
# channel in other matrix is 0.
# For example, we put 255 in input channel 1 in X, so we make the
# corresponding input channel in W all zeros.
X_min = -77
X_max = X_min + 255
X = np.round(np.random.rand(batch_size, input_channels) * 4 + X_min)
X = X.astype(np.float32)
X[:, 0] = X_min
if batch_size != 0:
X[0, 1] = X_max
W_min = -100
W_max = W_min + 255
W = np.round(
np.random.rand(output_channels, input_channels) * 4 - 2 + W_min + 128
)
W = W.astype(np.float32)
W[0, 0] = W_min
W[1, 0] = W_max
W[:, 1] = W_min + 128
# No input quantization error in bias
b = np.round(np.random.randn(output_channels)).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("FC", ""),
("FC", "DNNLOWP_ACC16"),
("Int8FC", "DNNLOWP_ACC16"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([quantize])
fc = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X", "W", "b"],
["Y_q" if do_dequantize else "Y"],
dequantize_output=(0 if do_dequantize else 1),
engine=engine,
device_option=gc,
)
net.Proto().op.extend([fc])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, None, net, X, W, b, op_type, engine, None, gc, outputs
)
check_quantized_results_close(outputs)
@given(
input_channels=st.sampled_from([2]),
output_channels=st.sampled_from([4]),
batch_size=st.sampled_from([0, 1]),
nbits_in_non_outlier=st.sampled_from([0, 6]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_fully_connected_acc16_outlier(
self,
input_channels,
output_channels,
batch_size,
nbits_in_non_outlier,
in_quantized,
out_quantized,
prepack_weight,
gc,
dc,
):
# X and W have scale 1, so exactly represented after quantization
# This was made sure by having at least one 0 and one 255 for unsigned
# 8-bit tensors, and at least one -128 and one 127 for signed 8-bit
# tensors.
# Since fbgemm_acc16 accumulates to 16-bit, To avoid overflow, we use
# small numbers except for those 0, 255, -128, and 127, for this test
# We also make sure 255, -128, or 127 are not multiplied together by
# putting them in different input channels and the corresponding input
# channel in other matrix is 0.
# For example, we put 255 in input channel 1 in X, so we make the
# corresponding input channel in W all zeros.
X_min = -77
X_max = X_min + 255
X = np.round(np.random.rand(batch_size, input_channels) * 4 + X_min)
X = X.astype(np.float32)
X[:, 0] = X_min
if batch_size != 0:
X[0, 1] = X_max
W_min = -100
W_max = W_min + 255
W = np.round(
np.random.rand(output_channels, input_channels) * 4 - 2 + W_min + 128
)
W = W.astype(np.float32)
W[0, 0] = W_min
W[1, 0] = W_max
W[:, 1] = W_min + 128
# No input quantization error in bias
b = np.round(np.random.randn(output_channels)).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("FC", ""),
("FC", "DNNLOWP_ACC16"),
("Int8FC", "DNNLOWP_ACC16"),
]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
do_prepack_weight = engine == "DNNLOWP" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([quantize])
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(X_min, X_max)
if do_prepack_weight:
inputs = ["W"]
if do_dequantize:
inputs += ["b"]
pack = core.CreateOperator(
"Int8FCPackWeight",
inputs,
["W_packed"],
in_scale=x_q_param.scale,
engine=engine,
)
init_net.Proto().op.extend([pack])
fc = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed" if do_prepack_weight else "W",
"b",
],
["Y_q" if do_dequantize else "Y"],
dequantize_output=(0 if do_dequantize else 1),
engine=engine,
nbits_in_non_outlier=nbits_in_non_outlier,
device_option=gc,
)
net.Proto().op.extend([fc])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, None, gc, outputs
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/fully_connected_dnnlowp_acc16_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import given, settings
class TestInt8GenQuantParamsMinMaxOperator(hu.HypothesisTestCase):
@settings(max_examples=20, deadline=None)
@given(
n=st.integers(10, 10),
m=st.integers(10, 10),
preserve_sparsity=st.booleans(),
rnd_seed=st.integers(1, 5),
**hu.gcs_cpu_only
)
def test_int8_gen_quant_params_min_max_op(
self, n, m, preserve_sparsity, rnd_seed, gc, dc
):
X_min = 0 if preserve_sparsity else -77
X_max = X_min + 255
np.random.seed(rnd_seed)
X = np.round(np.random.rand(n, m) * (X_max - X_min) + X_min).astype(
np.float32
)
# Calculate X_qparam
hist, bin_edges = np.histogram(X.flatten(), bins=2048)
X_qparam = dnnlowp_pybind11.ChooseStaticQuantizationParams(
np.min(X), np.max(X), hist, preserve_sparsity, 8, "MIN_MAX_QUANTIZATION"
)
# Build a net to generate X's qparam using the Int8GenQuantParamsMinMax op
workspace.FeedBlob("X", X, device_option=gc)
workspace.FeedBlob("X_min", np.array([np.min(X)]), device_option=gc)
workspace.FeedBlob("X_max", np.array([np.max(X)]), device_option=gc)
dnnlowp_pybind11.CreateInt8QuantSchemeBlob(
"quant_scheme", "MIN_MAX_QUANTIZATION", preserve_sparsity
)
assert workspace.HasBlob(
"quant_scheme"
), "Failed to create the quant_scheme blob in current workspace"
gen_quant_params_net = core.Net("gen_quant_params_min_max")
gen_quant_params_op = core.CreateOperator(
"Int8GenQuantParamsMinMax",
["X_min", "X_max", "quant_scheme"],
["quant_param"],
device_option=gc,
)
gen_quant_params_net.Proto().op.extend([gen_quant_params_op])
assert workspace.RunNetOnce(
gen_quant_params_net
), "Failed to run the gen_quant_params net"
scale, zero_point = dnnlowp_pybind11.ObserveInt8QuantParamsBlob("quant_param")
shapes, types = workspace.InferShapesAndTypes(
[gen_quant_params_net],
blob_dimensions={"X": [n, m], "X_min": [1], "X_max": [1], "quant_scheme": [1]},
blob_types={"X": core.DataType.FLOAT, "X_min": core.DataType.FLOAT, "X_max": core.DataType.FLOAT, "quant_scheme": core.DataType.STRING}
)
self.assertEqual(shapes["quant_param"], [1])
self.assertEqual(types["quant_param"], core.DataType.FLOAT)
np.testing.assert_equal(scale, X_qparam.scale)
np.testing.assert_equal(zero_point, X_qparam.zero_point)
|
pytorch-master
|
caffe2/quantization/server/int8_gen_quant_params_min_max_test.py
|
import copy
import logging
from collections import defaultdict
import numpy as np
from caffe2.python import core, utils
from caffe2.python.fb import hardcode_scale_zp # type: ignore[import]
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def blob_uses(net, blob):
u = []
for i, op in enumerate(net.op):
if blob in op.input or blob in op.control_input:
u.append(i)
return u
def fuse_first_bn(net, params, removed_tensors, begin_op_index):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for i, conv in enumerate(net.op[begin_op_index:], begin_op_index):
if conv.type not in ["Conv", "ConvTranspose"]:
continue
uses = blob_uses(net, conv.output[0])
if len(uses) == 0:
continue
j = uses[0]
bn = net.op[j]
if bn.type != "SpatialBN" or (len(uses) > 1 and conv.output[0] != bn.output[0]):
if bn.type == "SpatialBN":
logger.debug("Can't fuse if more than one user {}".format(uses))
# Can't fuse if more than one user unless SpatialBN is inplace
# An example of inplace SpatialBN where we want to allow multiple uses:
# x = Conv(...)
# ... // no interferring use or def of x (will be checked below)
# x = SpatialBN(x, ...)
# ...
# z = Foo(..., x, ...)
# ...
# w = Boo(..., x, ...)
# Here, we still want to fuse Conv and SpatialBN
continue
# There shouldn't be any def of conv.output[0] and any use or def of bn.output[0] between conv and bn
if any(
blob in net.op[k].input or blob in net.op[k].output
for blob in [conv.output[0], bn.output[0]]
for k in range(i + 1, j)
):
logger.debug(
"Can't fuse because of the following interferring uses or defs:"
)
for k in range(i, j + 1):
logger.debug(net.op[k])
continue
# else, can fuse
fused_conv = copy.deepcopy(conv)
fused_conv.output[0] = bn.output[0]
conv_weight = params[conv.input[1]]
if len(conv.input) > 2:
conv_bias = params[conv.input[2]]
else:
conv_bias = np.zeros(len(params[bn.input[2]])).astype(np.float32)
bn_scale = params[bn.input[1]]
bn_bias = params[bn.input[2]]
bn_running_mean = params[bn.input[3]]
bn_running_var = params[bn.input[4]]
# First, BN computation can be phrased as follows:
# (X - running_mean) * (1.0 / sqrt(running_var + eps)) *
# bn_scale + bias
# Thus, we can rewrite bn_scale as:
# X * bn_scale * 1.0 / (sqrt(running_var + eps)) + (bias -
# running_mean * (1.0 / sqrt(running_var + eps)) * bn_scale)
# Thus, can just have the affine transform
# X * A + B
# where
# A = bn_scale * 1.0 / (sqrt(running_var + eps))
# B = (bias - running_mean * (1.0 / sqrt(running_var + eps))
# * bn_scale)
eps = 1.0e-5
for arg in bn.arg:
if arg.name == "epsilon":
eps = arg.f
A = bn_scale * 1.0 / (np.sqrt(bn_running_var + eps))
B = bn_bias - bn_running_mean * A
# This identity should hold if we have correctly fused
# np.testing.assert_array_equal(
# params[conv.output[0]] * A + B,
# params[bn.output[0]])
# Now, we have that the computation made is the following:
# ((X `conv` W) + b) * A + B
# Then, we can simply fuse this as follows:
# (X `conv` (W * A)) + b * A + B
# which is simply
# (X `conv` Q) + C
# where
# Q = W * A
# C = b * A + B
# For ConvTranspose, from the view of convolutions as a
# Toepeliz multiplication, we have W_ = W^T, so the weights
# are laid out as (R, S, K, K) (vs (S, R, K, K) for a Conv),
# so the weights broadcast slightly differently. Remember, our
# BN scale 'B' is of size (S,)
A_ = (
A.reshape((-1,) + tuple([1] * (conv_weight.ndim - 1)))
if conv.type == "Conv"
else A.reshape((1, -1) + tuple([1] * (conv_weight.ndim - 2)))
)
C = conv_bias * A + B
Q = conv_weight * A_
assert params[conv.input[1]].shape == Q.shape
if len(conv.input) > 2:
assert params[conv.input[2]].shape == C.shape
else:
assert bn_bias.shape == C.shape
params[conv.input[1]] = Q
if len(conv.input) > 2:
params[conv.input[2]] = C
else:
params[bn.input[2]] = C
fused_conv.input.append(bn.input[2])
new_ops = net.op[:i] + [fused_conv] + net.op[i + 1 : j] + net.op[j + 1 :]
del net.op[:]
removed_tensors.append(bn.input[1])
if len(conv.input) > 2:
removed_tensors.append(bn.input[2])
removed_tensors.append(bn.input[3])
removed_tensors.append(bn.input[4])
del params[bn.input[1]]
if len(conv.input) > 2:
del params[bn.input[2]]
del params[bn.input[3]]
del params[bn.input[4]]
net.op.extend(new_ops)
return net, params, removed_tensors, i + 1
return net, params, removed_tensors, None
def fuse_bn(net, params, ignore_failure):
# Run until we hit a fixed point
removed_tensors = []
begin_op_index = 0
while True:
(next_net, next_params, removed_tensors, begin_op_index) = fuse_first_bn(
net, params, removed_tensors, begin_op_index
)
if begin_op_index is None:
if any(op.type == "SpatialBN" for op in next_net.op) and not ignore_failure:
raise Exception(
"Model contains SpatialBN op after fusion: %s", next_net
)
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_first_scale(net, params, removed_tensors):
net = copy.deepcopy(net)
params = copy.deepcopy(params)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if next_.input[0] != current.output[0]:
continue
if (
current.type != "SpatialBN"
or next_.type != "Mul"
or len(net.op) <= j + 1
or net.op[j + 1].type != "Add"
):
continue
# else, can fuse
bn = current
mul = next_
add = net.op[j + 1]
fused_bn = copy.deepcopy(bn)
fused_bn.output[0] = add.output[0]
bn_scale = params[bn.input[1]]
mul_scale = params[mul.input[1]]
bn_bias = params[bn.input[2]]
add_bias = params[add.input[1]]
params[bn.input[1]] = bn_scale * mul_scale
params[bn.input[2]] = mul_scale * bn_bias + add_bias
new_ops = net.op[:i] + [fused_bn] + net.op[j + 2 :]
del net.op[:]
removed_tensors.append(mul.input[1])
removed_tensors.append(add.input[1])
del params[mul.input[1]]
del params[add.input[1]]
net.op.extend(new_ops)
break
return net, params, removed_tensors
def fuse_scale(net, params, ignore_failure):
# Run until we hit a fixed point
removed_tensors = []
while True:
(next_net, next_params, removed_tensors) = fuse_first_scale(
net, params, removed_tensors
)
if len(next_net.op) == len(net.op):
return (next_net, next_params, removed_tensors)
net, params, removed_tensors = (next_net, next_params, removed_tensors)
def fuse_first_relu(net, begin_op_index, ignore_op_with_output=None):
net = copy.deepcopy(net)
for i, conv in enumerate(net.op[begin_op_index:], begin_op_index):
if conv.type not in ["Conv", "ConvTranspose", "Sum", "SpatialBN"]:
continue
uses = blob_uses(net, conv.output[0])
if (
len(uses) == 0
or ignore_op_with_output
and conv.output[0] in ignore_op_with_output
):
continue
j = uses[0]
relu = net.op[j]
if relu.type != "Relu" or len(uses) > 1 and conv.output[0] != relu.output[0]:
# Can't fuse if more than one user unless Relu is inplace
if relu.type == "Relu":
logger.debug("Can't fuse if more than one user {}".format(uses))
continue
# There shouldn't be any def of conv.output[0] and any use or def of relu.output[0] between conv and relu
if any(
blob in net.op[k].input or blob in net.op[k].output
for blob in [conv.output[0], relu.output[0]]
for k in range(i + 1, j)
):
logger.debug(
"Can't fuse because of the following interferring uses or defs:"
)
for k in range(i, j + 1):
logger.debug(net.op[k])
continue
# else, can fuse
fused_conv = copy.deepcopy(conv)
fused_conv.type = conv.type + "Relu"
fused_conv.output[0] = relu.output[0]
new_ops = net.op[:i] + [fused_conv] + net.op[i + 1 : j] + net.op[j + 1 :]
del net.op[:]
net.op.extend(new_ops)
return net, i + 1
return net, None
def fuse_relu(net, ignore_failure, ignore_op_with_output=None):
# Run until we hit a fixed point
begin_op_index = 0
while True:
next_net, begin_op_index = fuse_first_relu(
net, begin_op_index, ignore_op_with_output
)
if begin_op_index is None:
if any(op.type == "Relu" for op in next_net.op) and not ignore_failure:
raise Exception("Model contains Relu op after fusion: %s", next_net)
return next_net
net = next_net
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if op.output[0] == blob:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def swap_first_concat_relu(net, ignore_op_with_output=None):
net = copy.deepcopy(net)
for ((i, current), (j, next_)) in pairwise(enumerate(net.op)):
if next_.input[0] != current.output[0]:
continue
if current.type != "Concat" or next_.type != "Relu":
continue
if ignore_op_with_output and current.output[0] in ignore_op_with_output:
continue
# else, can swap
concat = copy.deepcopy(current)
relu = copy.deepcopy(next_)
pre_ops = copy.deepcopy(net.op[:i])
post_ops = copy.deepcopy(net.op[j + 1 :])
# Delete the Relu after Concat
concat.output[0] = relu.output[0]
# Insert Relu after each op that produces inputs to Concat
for blob in concat.input:
k = last_producer(pre_ops, blob)
producer = pre_ops[k]
assert producer.output[0] == blob
producer.output[0] = blob + "_pre_relu"
new_relu = copy.deepcopy(relu)
new_relu.input[0] = producer.output[0]
new_relu.output[0] = blob
pre_ops = pre_ops[: k + 1] + [new_relu] + pre_ops[k + 1 :]
new_ops = pre_ops + [concat] + post_ops
del net.op[:]
net.op.extend(new_ops)
break
return net
def swap_concat_relu(net, ignore_op_with_output=None):
# Run until we hit a fixed point
while True:
next_net = swap_first_concat_relu(net, ignore_op_with_output)
if len(next_net.op) == len(net.op):
return next_net
net = next_net
def add_version_to_conv_bias(net, init_net):
"""
In architectures such as FPN (https://arxiv.org/abs/1612.03144), few Conv
ops share the same weight and bias and are run at different scales of
the input. Since 'bias_scale = input_scale * weight_scale', sharing the
same bias blob among multiple Conv ops means that we need different bias
scale for each of the ops. To achieve this, we just duplicate those bias
blobs that are used by multiple Conv ops before performing int8 rewrite.
"""
bias_count = defaultdict(int)
for op in net._net.op:
if "Conv" in op.type and len(op.input) >= 3:
bias_count[op.input[2]] += 1
bias_fill_op = {}
for op in init_net._net.op:
if bias_count[op.output[0]] > 1:
bias_fill_op[op.output[0]] = op
bias_version = defaultdict(int)
for op in net._net.op:
if "Conv" in op.type and len(op.input) >= 3:
bias = op.input[2]
if bias_count[bias] <= 1:
continue
version = bias_version[bias]
bias_version[bias] += 1
if version == 0:
continue
new_bias = bias + "_v" + str(version)
fill_op = copy.deepcopy(bias_fill_op[bias])
fill_op.output[0] = new_bias
init_net._net.op.extend([fill_op])
op.input[2] = new_bias
net._net.external_input.append(new_bias)
def add_quantization_param_args_(op, q_param):
op.arg.extend(
[
utils.MakeArgument("Y_scale", q_param.scale),
utils.MakeArgument("Y_zero_point", q_param.zero_point),
]
)
def choose_quantization_params(tensor_min, tensor_max, preserve_sparsity=False):
if tensor_min < 0 and tensor_max > 0 and preserve_sparsity:
symmetric_qmin = -(255 // 2 + 1)
symmetric_qmax = 255 // 2
max_scale = max(
abs(tensor_min / symmetric_qmin), abs(tensor_max / symmetric_qmax)
)
tensor_min = max_scale * symmetric_qmin
tensor_max = max_scale * symmetric_qmax
q_param = hardcode_scale_zp.choose_quantization_params(tensor_min, tensor_max)
if tensor_min < 0 and tensor_max > 0 and preserve_sparsity:
q_param = hardcode_scale_zp.QuantizationParam(q_param.scale, 128)
return q_param
def add_quantization_param_args(op, tensor, preserve_sparsity=False):
tensor_min = 0 if tensor.size == 0 else tensor.min()
tensor_max = 0 if tensor.size == 0 else tensor.max()
q_param = choose_quantization_params(tensor_min, tensor_max, preserve_sparsity)
add_quantization_param_args_(op, q_param)
return q_param
def create_int8_given_tensor_fill(tensor, out_blob_name, preserve_sparsity=False):
"""
Create Int8GivenTensorFill op that quantizes the given tensor and outputs
an Int8Tensor with out_blob_name.
"""
op = core.CreateOperator("Int8GivenTensorFill", [], out_blob_name)
q_param = add_quantization_param_args(op, tensor, preserve_sparsity)
quantized_tensor = (
np.around(tensor / q_param.scale).astype(np.int32) + q_param.zero_point
)
quantized_tensor = np.maximum(0, np.minimum(quantized_tensor, 255))
op.arg.extend(
[
utils.MakeArgument("values", quantized_tensor.astype(np.uint8).tobytes()),
utils.MakeArgument("shape", quantized_tensor.shape),
]
)
return op, q_param
def create_int8_bias_tensor_fill(tensor, out_blob_name, x_q_param, w_q_param):
"""
Similar to create_int8_given_tensor_fill, but for bias blobs to be stored
as int32.
"""
scale = x_q_param.scale * w_q_param.scale
quantized_tensor = np.around(tensor / scale).astype(np.int32)
quantized_tensor.reshape(-1)
op = core.CreateOperator("Int8GivenIntTensorFill", [], out_blob_name)
op.arg.extend(
[
utils.MakeArgument("values", quantized_tensor),
utils.MakeArgument("shape", quantized_tensor.shape),
]
)
q_param = hardcode_scale_zp.QuantizationParam(scale, 0)
add_quantization_param_args_(op, q_param)
return op
|
pytorch-master
|
caffe2/quantization/server/utils.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPTanhOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_tanh(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 10 - 5).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [("Tanh", ""), ("Tanh", "DNNLOWP"), ("Int8Tanh", "DNNLOWP")]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
engine=engine,
device_option=gc,
followed_by="Tanh",
)
net.Proto().op.extend([quantize])
tanh = core.CreateOperator(
op_type,
["X_q" if engine == "DNNLOWP" else "X"],
["Y_q" if engine == "DNNLOWP" else "Y"],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([tanh])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
for o in outputs:
np.testing.assert_allclose(o.Y, outputs[0].Y, atol=0.02, rtol=0)
|
pytorch-master
|
caffe2/quantization/server/tanh_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPMulOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(32, 256),
is_empty=st.booleans(),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
in_place=st.sampled_from([(False, False), (True, False), (False, True)]),
**hu.gcs_cpu_only
)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_int(
self, N, is_empty, in_quantized, out_quantized, in_place, gc, dc
):
if is_empty:
N = 0
# FIXME: DNNLOWP Mul doesn't support inplace operation and
# dequantize_output=1 at the same time
if in_place[0] or in_place[1]:
in_quantized = True
out_quantized = True
# All inputs have scale 1, so exactly represented after quantization
min_ = -100
max_ = min_ + 255
A = np.round(np.random.rand(N) * (max_ - min_) + min_)
A = A.astype(np.float32)
if N != 0:
A[0] = min_
A[1] = max_
B = np.round(np.random.rand(N) * 255 - 128).astype(np.float32)
if N != 0:
B[0] = -128
B[1] = 127
Output = collections.namedtuple("Output", ["Y", "engine"])
outputs = []
engine_list = ["", "DNNLOWP"]
for engine in engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize_A = core.CreateOperator(
"Quantize", ["A"], ["A_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_A])
quantize_B = core.CreateOperator(
"Quantize", ["B"], ["B_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_B])
out = "Y"
if in_place[0]:
out = "A"
elif in_place[1]:
out = "B"
mul = core.CreateOperator(
"Mul",
["A_q", "B_q"] if do_quantize else ["A", "B"],
[(out + "_q") if do_dequantize else out],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([mul])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", [out + "_q"], [out], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("A").feed(A, device_option=gc)
self.ws.create_blob("B").feed(B, device_option=gc)
self.ws.run(net)
outputs.append(Output(Y=self.ws.blobs[out].fetch(), engine=engine))
check_quantized_results_close(outputs)
@given(**hu.gcs_cpu_only)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_broadcast(self, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
min_ = -100
max_ = min_ + 255
A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)
A = A.astype(np.float32)
A[0, 0, 0, 0] = min_
A[0, 0, 0, 1] = max_
B = np.round(np.random.rand(4, 5) * 255 - 128).astype(np.float32)
B[0, 0] = -128
B[0, 1] = 127
Output = collections.namedtuple("Output", ["Y", "engine"])
outputs = []
engine_list = ["", "DNNLOWP"]
for engine in engine_list:
net = core.Net("test_net")
mul = core.CreateOperator(
"Mul",
["A", "B"],
["Y"],
engine=engine,
device_option=gc,
broadcast=1,
dequantize_output=1,
)
net.Proto().op.extend([mul])
self.ws.create_blob("A").feed(A, device_option=gc)
self.ws.create_blob("B").feed(B, device_option=gc)
self.ws.run(net)
outputs.append(Output(Y=self.ws.blobs["Y"].fetch(), engine=engine))
check_quantized_results_close(outputs)
@given(**hu.gcs_cpu_only)
@settings(deadline=None)
def test_dnnlowp_elementwise_mul_broadcast_axis(self, gc, dc):
for bdim, axis in [
((3, 4), 1), # broadcasting intermediate dimensions
((2,), 0), # broadcasting the first dimension
((1, 4, 1), 1),
]:
# broadcasting with single elem dimensions at both ends
min_ = -100
max_ = min_ + 255
A = np.round(np.random.rand(2, 3, 4, 5) * (max_ - min_) + min_)
A = A.astype(np.float32)
B = np.round(np.random.rand(*bdim) * 255 - 128).astype(np.float32)
A.flat[0] = min_
A.flat[1] = max_
B.flat[0] = -128
B.flat[1] = 127
Output = collections.namedtuple("Output", ["Y", "engine"])
outputs = []
engine_list = ["", "DNNLOWP"]
for engine in engine_list:
net = core.Net("test_net")
mul = core.CreateOperator(
"Mul",
["A", "B"],
["Y"],
engine=engine,
device_option=gc,
broadcast=1,
axis=axis,
dequantize_output=1,
)
net.Proto().op.extend([mul])
self.ws.create_blob("A").feed(A, device_option=gc)
self.ws.create_blob("B").feed(B, device_option=gc)
self.ws.run(net)
outputs.append(Output(Y=self.ws.blobs["Y"].fetch(), engine=engine))
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/elementwise_mul_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
generate_conv_inputs,
generate_convnd_inputs,
run_conv_or_fc,
)
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpConvDepthWiseTest(hu.HypothesisTestCase):
@given(
stride=st.integers(1, 2),
size=st.integers(10, 16),
# depthwise 3x3 fast path only works for a multiple of 8
group=st.sampled_from([8, 24, 32]),
batch_size=st.integers(0, 3),
prepack_weight=st.booleans(),
share_col_buffer=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
quantize_groupwise=st.booleans(),
relu=st.booleans(),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_depthwise_3x3_conv(
self,
stride,
size,
group,
batch_size,
prepack_weight,
share_col_buffer,
preserve_activation_sparsity,
preserve_weight_sparsity,
quantize_groupwise,
relu,
gc,
dc,
):
pad = 1
kernel = 3
dilation = 1
input_channels_per_group = 1
output_channels_per_group = 1
order = "NHWC"
X, W, b = generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=quantize_groupwise,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
if relu:
op_engine_list = [
("Conv", ""),
("ConvRelu", "DNNLOWP"),
("Int8ConvRelu", "DNNLOWP"),
]
else:
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP"),
("Int8Conv", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
do_prepack_weight = engine == "DNNLOWP" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
if do_prepack_weight:
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(
X_min, X_max, preserve_activation_sparsity
)
inputs = ["W"]
if do_dequantize:
inputs += ["b"]
pack = core.CreateOperator(
"Int8ConvPackWeight",
inputs,
["W_packed"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=quantize_groupwise,
in_scale=x_q_param.scale,
)
init_net.Proto().op.extend([pack])
conv = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X", "W", "b"],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
shared_buffer=(1 if share_col_buffer else 0),
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=quantize_groupwise,
device_option=gc,
)
if do_dequantize or do_prepack_weight:
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
elif relu:
relu_op = core.CreateOperator(
"Relu", ["Y"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([relu_op])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
@given(
stride_0=st.integers(1, 2),
stride_1=st.integers(1, 2),
stride_2=st.integers(1, 2),
size=st.integers(5, 12),
# depthwise 3x3x3 fast path only works for a multiple of 8
group=st.sampled_from([8, 24, 32]),
batch_size=st.integers(0, 2),
prepack_weight=st.booleans(),
fuse_relu=st.booleans(),
share_col_buffer=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
quantize_groupwise=st.just(True),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_depthwise_3x3x3_conv(
self,
stride_0,
stride_1,
stride_2,
size,
group,
batch_size,
prepack_weight,
fuse_relu,
share_col_buffer,
preserve_activation_sparsity,
preserve_weight_sparsity,
quantize_groupwise,
gc,
dc,
):
pad = 1
kernel = 3
dilation = 1
input_channels_per_group = 1
output_channels_per_group = 1
order = "NHWC"
X, W, b = generate_convnd_inputs(
(stride_0, stride_1, stride_2),
(pad,) * 3,
(kernel,) * 3,
(dilation,) * 3,
(size,) * 3,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=quantize_groupwise,
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op = "ConvRelu" if fuse_relu else "Conv"
op_engine_list = [(op, ""), (op, "DNNLOWP"), ("Int8" + op, "DNNLOWP")]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
do_prepack_weight = engine == "DNNLOWP" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize])
if do_prepack_weight:
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(
X_min, X_max, preserve_activation_sparsity
)
inputs = ["W"]
if do_dequantize:
inputs += ["b"]
pack = core.CreateOperator(
"Int8ConvPackWeight",
inputs,
["W_packed"],
strides=[stride_0, stride_1, stride_2],
kernels=[kernel] * 3,
dilations=[dilation] * 3,
pads=[pad] * (3 * 2),
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=quantize_groupwise,
in_scale=x_q_param.scale,
)
init_net.Proto().op.extend([pack])
conv = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X", "W", "b"],
["Y_q" if do_dequantize else "Y"],
strides=[stride_0, stride_1, stride_2],
kernels=[kernel] * 3,
dilations=[dilation] * 3,
pads=[pad] * (3 * 2),
order=order,
shared_buffer=(1 if share_col_buffer else 0),
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=quantize_groupwise,
device_option=gc,
)
if do_dequantize or do_prepack_weight:
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
|
pytorch-master
|
caffe2/quantization/server/conv_depthwise_dnnlowp_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
from caffe2.quantization.server import dnnlowp_pybind11
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase):
@given(
**hu.gcs_cpu_only
)
def test_int8_quant_scheme_blob_fill_op(
self, gc, dc
):
# Build a net to generate qscheme blob using the Int8QuantSchemeBlobFill op
gen_quant_scheme_net = core.Net("gen_quant_scheme")
gen_quant_scheme_op = core.CreateOperator(
"Int8QuantSchemeBlobFill",
[],
["quant_scheme"],
quantization_kind="MIN_MAX_QUANTIZATION",
preserve_sparsity=False,
device_option=gc,
)
gen_quant_scheme_net.Proto().op.extend([gen_quant_scheme_op])
assert workspace.RunNetOnce(
gen_quant_scheme_net
), "Failed to run the gen_quant_scheme net"
quantization_kind, preserve_sparsity = dnnlowp_pybind11.ObserveInt8QuantSchemeBlob("quant_scheme")
assert quantization_kind == "MIN_MAX_QUANTIZATION"
assert not preserve_sparsity
|
pytorch-master
|
caffe2/quantization/server/int8_quant_scheme_blob_fill_test.py
|
import numpy as np
from caffe2.python import core, workspace
from caffe2.quantization.server import dnnlowp_pybind11 # type: ignore[attr-defined]
net = core.Net("test_net")
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
W = np.array([[5, 6], [7, 8]]).astype(np.float32)
b = np.array([0, 1]).astype(np.float32)
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
Y = net.FC(["X", "W", "b"], ["Y"])
dnnlowp_pybind11.ObserveMinMaxOfOutput("test_net.minmax", 1)
workspace.CreateNet(net)
workspace.RunNet(net)
print(workspace.FetchBlob("Y"))
workspace.ResetWorkspace()
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
dnnlowp_pybind11.ObserveHistogramOfOutput("test_net.hist", 1)
workspace.CreateNet(net)
workspace.RunNet(net)
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
dnnlowp_pybind11.AddOutputColumnMaxHistogramObserver(
net._net.name, "test_net._col_max_hist", ["Y"]
)
workspace.RunNet(net)
|
pytorch-master
|
caffe2/quantization/server/observer_test.py
|
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPQuantizeOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048),
is_empty=st.booleans(),
absorb=st.booleans(),
**hu.gcs_cpu_only)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_quantize(self, size, is_empty, absorb, gc, dc):
if is_empty:
size = 0
min_ = -10.0
max_ = 20.0
X = (np.random.rand(size) * (max_ - min_) + min_).astype(np.float32)
X_min = 0 if X.size == 0 else X.min()
X_max = 1 if X.size == 0 else X.max()
X_scale = (max(X_max, 0) - min(X_min, 0)) / 255
X_zero = np.round(-X_min / X_scale)
op_type_list = ["Quantize", "Int8Quantize"]
engine = "DNNLOWP"
for op_type in op_type_list:
net = core.Net("test_net")
quantize = core.CreateOperator(
op_type, ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"quant_param", float(X_scale), int(X_zero)
)
quantize_2 = core.CreateOperator(
op_type,
["X", "quant_param"],
["X_q_2"],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([quantize_2])
if absorb:
net_str = dnnlowp_pybind11.freeze_quantization_params(
net.Proto().SerializeToString())
net.Proto().ParseFromString(net_str)
workspace.FeedBlob("X", X, device_option=gc)
workspace.RunNetOnce(net)
X_q = workspace.FetchInt8Blob("X_q")[0]
X_q_2 = workspace.FetchInt8Blob("X_q_2")[0]
# Dequantize results and measure quantization error against inputs
X_dq = X_scale * (X_q - X_zero)
X_dq_2 = X_scale * (X_q_2 - X_zero)
# should be divided by 2 in an exact math, but divide by 1.9 here
# considering finite precision in floating-point numbers
atol = X_scale / 1.9
np.testing.assert_allclose(X_dq, X, atol=atol, rtol=0)
np.testing.assert_allclose(X_dq_2, X, atol=atol, rtol=0)
|
pytorch-master
|
caffe2/quantization/server/quantize_dnnlowp_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import given, settings
class TestInt8GenQuantParamsOperator(hu.HypothesisTestCase):
@settings(max_examples=20, deadline=None)
@given(
n=st.integers(10, 100),
m=st.integers(1, 128),
k=st.integers(64, 1024),
quantization_kind=st.sampled_from(
[
"MIN_MAX_QUANTIZATION",
"L2_MIN_QUANTIZATION_APPROX",
"L2_MIN_QUANTIZATION",
"P99_QUANTIZATION"
]
),
preserve_sparsity=st.booleans(),
rnd_seed=st.integers(1, 5),
**hu.gcs_cpu_only
)
def test_int8_gen_quant_params_op(
self, n, m, k, quantization_kind, preserve_sparsity, rnd_seed, gc, dc
):
assert n > 0, "Zero samples in the input data"
X_min = 0 if preserve_sparsity else -77
X_max = X_min + 255
np.random.seed(rnd_seed)
X = np.round(np.random.rand(n, m, k) * (X_max - X_min) + X_min).astype(
np.float32
)
# Calculate X_qparam
hist, bin_edges = np.histogram(X.flatten(), bins=2048)
X_qparam = dnnlowp_pybind11.ChooseStaticQuantizationParams(
np.min(X), np.max(X), hist, preserve_sparsity, 8, quantization_kind
)
# Build a net to generate X's qparam using the Int8GenQuantParams op
workspace.FeedBlob("X", X, device_option=gc)
dnnlowp_pybind11.CreateInt8QuantSchemeBlob(
"quant_scheme", quantization_kind, preserve_sparsity
)
assert workspace.HasBlob(
"quant_scheme"
), "Failed to create the quant_scheme blob in current workspace"
gen_quant_params_net = core.Net("gen_quant_params")
gen_quant_params_op = core.CreateOperator(
"Int8GenQuantParams",
["X", "quant_scheme"],
["quant_param"],
device_option=gc,
)
gen_quant_params_net.Proto().op.extend([gen_quant_params_op])
assert workspace.RunNetOnce(
gen_quant_params_net
), "Failed to run the gen_quant_params net"
scale, zero_point = dnnlowp_pybind11.ObserveInt8QuantParamsBlob("quant_param")
shapes, types = workspace.InferShapesAndTypes(
[gen_quant_params_net],
blob_dimensions={"X": [n, m, k], "quant_scheme": [1]},
blob_types={"X": core.DataType.FLOAT, "quant_scheme": core.DataType.STRING}
)
self.assertEqual(shapes["quant_param"], [1])
self.assertEqual(types["quant_param"], core.DataType.FLOAT)
np.testing.assert_equal(scale, X_qparam.scale)
np.testing.assert_equal(zero_point, X_qparam.zero_point)
|
pytorch-master
|
caffe2/quantization/server/int8_gen_quant_params_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
avoid_vpmaddubsw_overflow_fc,
check_quantized_results_close,
run_conv_or_fc,
)
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class RowWiseDNNLowPFullyConnectedOpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
input_channels=st.sampled_from([3, 4, 5, 8, 16, 32]),
output_channels=st.integers(2, 16),
batch_size=st.integers(0, 16),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
prepack_weight=st.booleans(),
**hu.gcs_cpu_only
)
def test_rowwise_dnnlowp_fully_connected_int(
self,
input_channels,
output_channels,
batch_size,
in_quantized,
out_quantized,
prepack_weight,
gc,
dc,
):
# X has scale 1, so exactly represented after quantization
X_min = -77
X_max = X_min + 255
X = np.round(
np.random.rand(batch_size, input_channels) * (X_max - X_min) + X_min
)
X = X.astype(np.float32)
# input channels 0 and 1 are all X_min to avoid overflow from vpmaddubsw
# when multiplied with W_min and W_max
X[:, 0:2] = X_min
if batch_size != 0:
X[0, 2] = X_max
# Each row of W has scale 1 but with different offset, so row-wise
# quantization shouldn't have any input quantization error.
W = np.zeros((output_channels, input_channels))
W = W.astype(np.float32)
for i in range(output_channels):
W_min = -100 + i
W_max = W_min + 255
W[i, :] = np.round(np.random.rand(input_channels) * (W_max - W_min) + W_min)
W[i, 0] = W_min
W[i, 1] = W_max
# Make sure we won't have overflows from vpmaddubsw instruction used in
# fbgemm
avoid_vpmaddubsw_overflow_fc(
batch_size,
input_channels,
1,
X,
X_min,
X_max,
W[i : i + 1,],
W_min,
W_max,
)
if i % 2 == 0:
W[i, :] = (W[i, :] - W_min) * 2 + W_min
b = np.random.randn(output_channels).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("FC", ""),
("FC", "DNNLOWP_ROWWISE"),
("FC", "DNNLOWP_ROWWISE_16"),
("Int8FC", "DNNLOWP_ROWWISE"),
]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
do_prepack_weight = engine == "DNNLOWP_ROWWISE" and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(X_min, X_max)
if do_prepack_weight:
inputs = ["W"]
if do_dequantize:
inputs += ["b"]
pack = core.CreateOperator(
"Int8FCPackWeight",
inputs,
["W_packed"],
in_scale=x_q_param.scale,
engine=engine,
)
init_net.Proto().op.extend([pack])
fc = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed" if do_prepack_weight else "W",
"b",
],
["Y_q" if do_dequantize else "Y"],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
)
if do_prepack_weight:
# When pre-packed quantized weight is provided, we can't rescale
# the output dynamically by looking at the range of output of
# each batch, so here we provide the range of output observed
# from fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(fc, outputs[0][0])
net.Proto().op.extend([fc])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, None, gc, outputs
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/fully_connected_rowwise_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPOpSpatialBNTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
size=st.integers(10, 16),
input_channels=st.integers(2, 16),
output_channels=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
fuse_relu=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_spatial_bn_int(
self,
size,
input_channels,
output_channels,
batch_size,
order,
in_quantized,
out_quantized,
fuse_relu,
gc,
dc,
):
X_min = -77
X_max = X_min + 255
X = np.round(np.random.rand(batch_size, size, size, input_channels)).astype(
np.float32
)
if batch_size != 0:
X[0, 0, 0, 0] = X_min
X[0, 0, 0, 1] = X_max
epsilon = np.abs(np.random.rand())
scale = np.random.rand(input_channels).astype(np.float32)
bias = np.random.rand(input_channels).astype(np.float32)
mean = np.random.rand(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32)
if order == "NCHW":
X = utils.NHWC2NCHW(X)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("SpatialBN", ""),
]
if fuse_relu:
op_engine_list += [
("Int8SpatialBNRelu", "DNNLOWP"),
]
else:
op_engine_list += [
("SpatialBN", "DNNLOWP"),
("Int8SpatialBN", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine
)
net.Proto().op.extend([quantize])
bn = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X", "scale", "bias", "mean", "var"],
["Y_q" if do_dequantize else "Y"],
is_test=True,
epsilon=epsilon,
order=order,
engine=engine,
dequantize_output=not do_dequantize,
)
net.Proto().op.extend([bn])
if "DNNLOWP" in engine:
dnnlowp_utils.add_quantization_param_args(bn, outputs[0][0])
if fuse_relu and "DNNLOWP" not in engine:
net.Relu(["Y"], "Y")
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("scale").feed(scale, device_option=gc)
self.ws.create_blob("bias").feed(bias, device_option=gc)
self.ws.create_blob("mean").feed(mean, device_option=gc)
self.ws.create_blob("var").feed(var, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/spatial_batch_norm_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPConcatOpTest(hu.HypothesisTestCase):
@given(
dim1=st.integers(0, 256),
dim2=st.integers(0, 256),
axis=st.integers(0, 1),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_concat_int(
self, dim1, dim2, axis, in_quantized, out_quantized, gc, dc
):
# X has scale 1, so exactly represented after quantization
min_ = -100
max_ = min_ + 255
X = np.round(np.random.rand(dim1, dim2) * (max_ - min_) + min_)
X = X.astype(np.float32)
if dim1 >= 1 and dim2 >= 2:
X[0, 0] = min_
X[0, 1] = max_
elif dim2 == 1:
return
# Y has scale 1/2, so exactly represented after quantization
Y = np.round(np.random.rand(dim1, dim2) * 255 / 2 - 64)
Y = Y.astype(np.float32)
if dim1 >= 1 and dim2 >= 2:
Y[0, 0] = -64
Y[0, 1] = 127.0 / 2
Output = collections.namedtuple("Output", ["Z", "op_type", "engine"])
outputs = []
op_engine_list = [
("Concat", ""),
("Concat", "DNNLOWP"),
("Int8Concat", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize_x = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
quantize_y = core.CreateOperator(
"Quantize", ["Y"], ["Y_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_x, quantize_y])
concat = core.CreateOperator(
op_type,
["X_q", "Y_q"] if do_quantize else ["X", "Y"],
["Z_q" if do_dequantize else "Z", "split"],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
axis=axis,
)
net.Proto().op.extend([concat])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Z_q"], ["Z"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("Y").feed(Y, device_option=gc)
self.ws.create_blob("split")
self.ws.run(net)
outputs.append(
Output(Z=self.ws.blobs["Z"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/concat_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
run_conv_or_fc
)
from hypothesis import assume, given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(
[
"caffe2",
"--caffe2_omp_num_threads=11",
# Increase this threshold to test acc16 with randomly generated data
"--caffe2_dnnlowp_acc16_density_threshold=0.5",
]
)
class DNNLowPOpConvAcc16OpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
weight_quantized=st.booleans(),
share_col_buffer=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_dnnlowp_conv_acc16_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
weight_quantized,
share_col_buffer,
preserve_activation_sparsity,
preserve_weight_sparsity,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume(size >= dilation * (kernel - 1) + 1)
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
# X and W have scale 1, so exactly represented after quantization
# This was made sure by having at least one 0 and one 255 for unsigned
# 8-bit tensors, and at least one -128 and one 127 for signed 8-bit
# tensors.
# Since fbgemm_acc16 accumulates to 16-bit, To avoid overflow, we use
# small numbers except for those 0, 255, -128, and 127, for this test
# We also make sure 255, -128, or 127 are not multiplied together by
# putting them in different input channels and the corresponding input
# channel in other matrix is 0.
# For example, we put 255 in input channel 1 in X, so we make the
# corresponding input channel in W all zeros.
X_min = 0 if preserve_activation_sparsity else -77
X_max = X_min + 255
X = np.random.rand(batch_size, size, size, input_channels) * 4 + X_min
X = np.round(X).astype(np.float32)
X[..., 0] = X_min
if batch_size != 0:
X[0, 0, 0, 1] = X_max
if preserve_weight_sparsity:
W_min = -128
W_max = 100
else:
W_min = -100
W_max = W_min + 255
W = (
np.random.rand(output_channels, kernel, kernel, input_channels_per_group)
* 4
- 2
+ W_min
+ 128
)
W = np.round(W).astype(np.float32)
W[0, 0, 0, 0] = W_min
W[1, 0, 0, 0] = W_max
W[..., 1] = W_min + 128 # "zeros"
if order == "NCHW":
X = utils.NHWC2NCHW(X)
W = utils.NHWC2NCHW(W)
# No input quantization error in bias
b = np.round(np.random.randn(output_channels)).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP_ACC16"),
("Int8Conv", "DNNLOWP_ACC16"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
do_quantize_weight = (
"DNNLOWP" in engine and weight_quantized and len(outputs) > 0
)
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine="DNNLOWP",
device_option=gc,
)
net.Proto().op.extend([quantize])
if do_quantize_weight:
int8_given_tensor_fill, w_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
W, "W_q", preserve_weight_sparsity
)
net.Proto().op.extend([int8_given_tensor_fill])
# Bias
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(
X_min, X_max, preserve_activation_sparsity
)
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
b, "b_q", x_q_param, w_q_param
)
net.Proto().op.extend([int8_bias_tensor_fill])
conv = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_q" if do_quantize_weight else "W",
"b_q" if do_quantize_weight else "b",
],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
shared_buffer=(1 if share_col_buffer else 0),
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
device_option=gc,
)
if do_dequantize or do_quantize_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, None, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NHWC"]),
weight_quantized=st.booleans(),
prepack_weight=st.booleans(),
nbits_in_non_outlier=st.sampled_from((0, 1, 6, 8)),
share_col_buffer=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_dnnlowp_conv_acc16_outlier(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
weight_quantized,
prepack_weight,
nbits_in_non_outlier,
share_col_buffer,
preserve_activation_sparsity,
preserve_weight_sparsity,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume(size >= dilation * (kernel - 1) + 1)
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
X_min = 0 if preserve_activation_sparsity else -77
X_max = X_min + 255
X = np.random.rand(batch_size, size, size, input_channels) * 4 + X_min
X = np.round(X).astype(np.float32)
X[..., 0] = X_min
if batch_size != 0:
X[0, 0, 0, 1] = X_max
if preserve_weight_sparsity:
W_min = -128
W_max = 100
else:
W_min = -100
W_max = W_min + 255
W = (
np.random.rand(output_channels, kernel, kernel, input_channels_per_group)
* 4
- 2
+ W_min
+ 128
)
W = np.round(W).astype(np.float32)
W[0, 0, 0, 0] = W_min
W[1, 0, 0, 0] = W_max
W[..., 1] = W_min + 128 # "zeros"
if order == "NCHW":
X = utils.NHWC2NCHW(X)
W = utils.NHWC2NCHW(W)
b = np.round(np.random.randn(output_channels)).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP_ACC16"),
("Int8Conv", "DNNLOWP_ACC16"),
]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
do_quantize_weight = "DNNLOWP" in engine and weight_quantized
do_prepack_weight = "DNNLOWP" in engine and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine="DNNLOWP",
device_option=gc,
)
net.Proto().op.extend([quantize])
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(
X_min, X_max, preserve_activation_sparsity
)
if do_quantize_weight:
int8_given_tensor_fill, w_q_param = dnnlowp_utils.create_int8_given_tensor_fill(
W, "W_q", preserve_weight_sparsity
)
init_net.Proto().op.extend([int8_given_tensor_fill])
# Bias
int8_bias_tensor_fill = dnnlowp_utils.create_int8_bias_tensor_fill(
b, "b_q", x_q_param, w_q_param
)
init_net.Proto().op.extend([int8_bias_tensor_fill])
if do_prepack_weight:
inputs = ["W_q" if do_quantize_weight else "W"]
if do_dequantize:
inputs += ["b_q" if do_quantize_weight else "b"]
pack = core.CreateOperator(
"Int8ConvPackWeight",
inputs,
["W_packed"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
nbits_in_non_outlier=nbits_in_non_outlier,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
in_scale=x_q_param.scale,
)
init_net.Proto().op.extend([pack])
conv = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed"
if do_prepack_weight
else ("W_q" if do_quantize_weight else "W"),
"b_q" if do_quantize_weight else "b",
],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
nbits_in_non_outlier=nbits_in_non_outlier,
shared_buffer=(1 if share_col_buffer else 0),
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
device_option=gc,
)
if do_dequantize or do_quantize_weight or do_prepack_weight:
# When quantized weight is provided, we can't rescale the
# output dynamically by looking at the range of output of each
# batch, so here we provide the range of output observed from
# fp32 reference implementation
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
|
pytorch-master
|
caffe2/quantization/server/conv_dnnlowp_acc16_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPLSTMUnitOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(0, 64),
D=st.integers(4, 64),
forget_bias=st.integers(0, 4),
**hu.gcs_cpu_only
)
@settings(max_examples=10, deadline=None)
def test_dnnlowp_lstm_unit(self, N, D, forget_bias, gc, dc):
# X has scale 1, so exactly represented after quantization
H_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
C_in = np.clip(np.random.randn(1, N, D), -1, 1).astype(np.float32)
G = np.clip(np.random.randn(1, N, 4 * D), -1, 1).astype(np.float32)
seq_lengths = np.round(np.random.rand(N)).astype(np.int32)
# seq_lengths.fill(0)
t = np.array([5]).astype(np.int32)
Output = collections.namedtuple("Output", ["H_out", "C_out", "engine"])
outputs = []
engine_list = ["", "DNNLOWP"]
for engine in engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
quantize_H_in = core.CreateOperator(
"Quantize", ["H_in"], ["H_in_q"], engine=engine, device_option=gc
)
quantize_C_in = core.CreateOperator(
"Quantize", ["C_in"], ["C_in_q"], engine=engine, device_option=gc
)
quantize_G = core.CreateOperator(
"Quantize", ["G"], ["G_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize_H_in, quantize_C_in, quantize_G])
lstm = core.CreateOperator(
"LSTMUnit",
[
"H_in_q" if engine == "DNNLOWP" else "H_in",
"C_in_q" if engine == "DNNLOWP" else "C_in",
"G_q" if engine == "DNNLOWP" else "G",
"seq_lengths",
"t",
],
[
"H_out_q" if engine == "DNNLOWP" else "H_out",
"C_out_q" if engine == "DNNLOWP" else "C_out",
],
engine=engine,
device_option=gc,
axis=0,
)
net.Proto().op.extend([lstm])
if engine == "DNNLOWP":
dequantize_H_out = core.CreateOperator(
"Dequantize",
["H_out_q"],
["H_out"],
engine=engine,
device_option=gc,
)
dequantize_C_out = core.CreateOperator(
"Dequantize",
["C_out_q"],
["C_out"],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([dequantize_H_out, dequantize_C_out])
self.ws.create_blob("H_in").feed(H_in, device_option=gc)
self.ws.create_blob("C_in").feed(C_in, device_option=gc)
self.ws.create_blob("G").feed(G, device_option=gc)
self.ws.create_blob("seq_lengths").feed(seq_lengths, device_option=gc)
self.ws.create_blob("t").feed(t, device_option=gc)
self.ws.run(net)
outputs.append(
Output(
H_out=self.ws.blobs["H_out"].fetch(),
C_out=self.ws.blobs["C_out"].fetch(),
engine=engine,
)
)
for o in outputs:
np.testing.assert_allclose(o.C_out, outputs[0].C_out, atol=0.1, rtol=0.2)
np.testing.assert_allclose(o.H_out, outputs[0].H_out, atol=0.1, rtol=0.2)
|
pytorch-master
|
caffe2/quantization/server/lstm_unit_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, utils, workspace
from caffe2.quantization.server import utils as dnnlowp_utils
from caffe2.quantization.server.dnnlowp_test_utils import (
check_quantized_results_close,
run_conv_or_fc
)
from hypothesis import assume, given, settings
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(
[
"caffe2",
"--caffe2_omp_num_threads=11",
# Increase this threshold to test acc16 with randomly generated data
"--caffe2_dnnlowp_acc16_density_threshold=0.5",
]
)
class GroupWiseDNNLowPOpConvAcc16OpTest(hu.HypothesisTestCase):
# correctness test with no quantization error in inputs
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
share_col_buffer=st.booleans(),
preserve_activation_sparsity=st.booleans(),
preserve_weight_sparsity=st.booleans(),
**hu.gcs_cpu_only
)
@settings(deadline=None)
def test_groupwise_dnnlowp_conv_acc16_int(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
share_col_buffer,
preserve_activation_sparsity,
preserve_weight_sparsity,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume(size >= dilation * (kernel - 1) + 1)
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
# X and W have scale 1, so exactly represented after quantization
# This was made sure by having at least one 0 and one 255 for unsigned
# 8-bit tensors, and at least one -128 and one 127 for signed 8-bit
# tensors.
# Since fbgemm_acc16 accumulates to 16-bit, To avoid overflow, we use
# small numbers except for those 0, 255, -128, and 127, for this test
# We also make sure 255, -128, or 127 are not multiplied together by
# putting them in different input channels and the corresponding input
# channel in other matrix is 0.
# For example, we put 255 in input channel 1 in X, so we make the
# corresponding input channel in W all zeros.
X_min = 0 if preserve_activation_sparsity else -77
X_max = X_min + 255
X = np.random.rand(batch_size, size, size, input_channels) * 4 + X_min
X = np.round(X).astype(np.float32)
X[..., 0] = X_min
if batch_size != 0:
X[0, 0, 0, 1] = X_max
if preserve_weight_sparsity:
W_min = -128
W_max = 100
else:
W_min = -100
W_max = W_min + 255
W = (
np.random.rand(output_channels, kernel, kernel, input_channels_per_group)
* 4
- 2
+ W_min
+ 128
)
W = np.round(W).astype(np.float32)
W[..., 1] = W_min + 128 # "zeros"
for g in range(group):
W[g * output_channels_per_group, 0, 0, 0] = W_min
W[g * output_channels_per_group + 1, 0, 0, 0] = W_max
if not preserve_weight_sparsity:
W[
g * output_channels_per_group : (g + 1) * output_channels_per_group,
] += g
if order == "NCHW":
X = utils.NHWC2NCHW(X)
W = utils.NHWC2NCHW(W)
# No input quantization error in bias
b = np.round(np.random.randn(output_channels)).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP_ACC16"),
("Int8Conv", "DNNLOWP_ACC16"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
if do_quantize:
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
preserve_activation_sparsity=preserve_activation_sparsity,
engine="DNNLOWP",
device_option=gc,
)
net.Proto().op.extend([quantize])
conv = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X", "W", "b"],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
shared_buffer=(1 if share_col_buffer else 0),
preserve_activation_sparsity=preserve_activation_sparsity,
preserve_weight_sparsity=preserve_weight_sparsity,
engine=engine,
group=group,
quantize_groupwise=1,
device_option=gc,
)
if do_dequantize:
# groupwise quantization only works with static quantization
# so we need to set quantization parameters
dnnlowp_utils.add_quantization_param_args(
conv, outputs[0][0], preserve_activation_sparsity
)
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, None, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs, symmetric=preserve_activation_sparsity)
@given(
stride=st.integers(1, 2),
pad=st.integers(0, 2),
kernel=st.integers(1, 5),
dilation=st.integers(1, 2),
size=st.integers(10, 16),
group=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 3, 4, 5, 8, 16, 32]),
output_channels_per_group=st.integers(2, 16),
batch_size=st.integers(0, 3),
order=st.sampled_from(["NHWC"]),
prepack_weight=st.booleans(),
nbits_in_non_outlier=st.sampled_from((0, 1, 6, 8)),
share_col_buffer=st.booleans(),
**hu.gcs_cpu_only
)
def test_groupwise_dnnlowp_conv_acc16_outlier(
self,
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
prepack_weight,
nbits_in_non_outlier,
share_col_buffer,
gc,
dc,
):
assume(group == 1 or dilation == 1)
assume(size >= dilation * (kernel - 1) + 1)
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
X_min = -77
X_max = X_min + 255
X = np.random.rand(batch_size, size, size, input_channels) * 4 + X_min
X = np.round(X).astype(np.float32)
X[..., 0] = X_min
if batch_size != 0:
X[0, 0, 0, 1] = X_max
W_min = -100
W_max = W_min + 255
W = (
np.random.rand(output_channels, kernel, kernel, input_channels_per_group)
* 4
- 2
+ W_min
+ 128
)
W = np.round(W).astype(np.float32)
W[..., 1] = W_min + 128 # "zeros"
for g in range(group):
W[g * output_channels_per_group, 0, 0, 0] = W_min
W[g * output_channels_per_group + 1, 0, 0, 0] = W_max
W[g * output_channels_per_group : (g + 1) * output_channels_per_group,] += g
if order == "NCHW":
X = utils.NHWC2NCHW(X)
W = utils.NHWC2NCHW(W)
b = np.round(np.random.randn(output_channels)).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
outputs = []
op_engine_list = [
("Conv", ""),
("Conv", "DNNLOWP_ACC16"),
("Int8Conv", "DNNLOWP_ACC16"),
]
for op_type, engine in op_engine_list:
init_net = core.Net("test_init_net")
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine
do_dequantize = "DNNLOWP" in engine
do_prepack_weight = "DNNLOWP" in engine and prepack_weight
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([quantize])
if do_prepack_weight:
X_min = 0 if X.size == 0 else X.min()
X_max = 0 if X.size == 0 else X.max()
x_q_param = dnnlowp_utils.choose_quantization_params(X_min, X_max)
inputs = ["W"]
if do_dequantize:
inputs += ["b"]
pack = core.CreateOperator(
"Int8ConvPackWeight",
inputs,
["W_packed"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
nbits_in_non_outlier=nbits_in_non_outlier,
engine=engine,
group=group,
quantize_groupwise=1,
in_scale=x_q_param.scale,
)
init_net.Proto().op.extend([pack])
conv = core.CreateOperator(
op_type,
[
"X_q" if do_quantize else "X",
"W_packed" if do_prepack_weight else "W",
"b",
],
["Y_q" if do_dequantize else "Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
nbits_in_non_outlier=nbits_in_non_outlier,
shared_buffer=(1 if share_col_buffer else 0),
engine=engine,
group=group,
quantize_groupwise=1,
device_option=gc,
)
if do_dequantize or do_prepack_weight:
# groupwise quantization only works with static quantization
# so we need to set quantization parameters
dnnlowp_utils.add_quantization_param_args(conv, outputs[0][0])
net.Proto().op.extend([conv])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine="DNNLOWP", device_option=gc
)
net.Proto().op.extend([dequantize])
run_conv_or_fc(
self, init_net, net, X, W, b, op_type, engine, order, gc, outputs
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/conv_groupwise_dnnlowp_acc16_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPSigmoidOpTest(hu.HypothesisTestCase):
@given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)
def test_dnnlowp_sigmoid(self, size, is_empty, gc, dc):
if is_empty:
size = 0
X = (np.random.rand(size) * 20 - 10).astype(np.float32)
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("Sigmoid", ""),
("Sigmoid", "DNNLOWP"),
("Int8Sigmoid", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
if engine == "DNNLOWP":
quantize = core.CreateOperator(
"Quantize",
["X"],
["X_q"],
engine=engine,
device_option=gc,
followed_by="Sigmoid",
)
net.Proto().op.extend([quantize])
sigmoid = core.CreateOperator(
op_type,
["X_q" if engine == "DNNLOWP" else "X"],
["Y_q" if engine == "DNNLOWP" else "Y"],
engine=engine,
device_option=gc,
)
net.Proto().op.extend([sigmoid])
if engine == "DNNLOWP":
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
for o in outputs:
np.testing.assert_allclose(o.Y, outputs[0].Y, atol=0.01, rtol=0)
|
pytorch-master
|
caffe2/quantization/server/sigmoid_dnnlowp_op_test.py
|
import collections
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, dyndep, workspace
from caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close
from hypothesis import given
dyndep.InitOpsLibrary("//caffe2/caffe2/quantization/server:dnnlowp_ops")
workspace.GlobalInit(["caffe2", "--caffe2_omp_num_threads=11"])
class DNNLowPElementwiseLinearOpTest(hu.HypothesisTestCase):
@given(
N=st.integers(32, 256),
D=st.integers(32, 256),
empty_batch=st.booleans(),
in_quantized=st.booleans(),
out_quantized=st.booleans(),
**hu.gcs_cpu_only
)
def test_dnnlowp_elementwise_linear_int(
self, N, D, empty_batch, in_quantized, out_quantized, gc, dc
):
if empty_batch:
N = 0
# All inputs have scale 1, so exactly represented after quantization
min_ = -100
max_ = min_ + 255
X = np.round(np.random.rand(N, D) * (max_ - min_) + min_)
X = X.astype(np.float32)
if N != 0:
X[0, 0] = min_
X[0, 1] = max_
a = np.round(np.random.rand(D) * 255 - 128).astype(np.float32)
a[0] = -128
a[1] = 127
b = np.round(np.random.rand(D) * 255 - 128).astype(np.float32)
b[0] = -128
b[1] = 127
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
outputs = []
op_engine_list = [
("ElementwiseLinear", ""),
("ElementwiseLinear", "DNNLOWP"),
("Int8ElementwiseLinear", "DNNLOWP"),
]
for op_type, engine in op_engine_list:
net = core.Net("test_net")
do_quantize = "DNNLOWP" in engine and in_quantized
do_dequantize = "DNNLOWP" in engine and out_quantized
if do_quantize:
quantize = core.CreateOperator(
"Quantize", ["X"], ["X_q"], engine=engine, device_option=gc
)
net.Proto().op.extend([quantize])
eltwise_linear = core.CreateOperator(
op_type,
["X_q" if do_quantize else "X", "a", "b"],
["Y_q" if do_dequantize else "Y"],
dequantize_output=not do_dequantize,
engine=engine,
device_option=gc,
)
net.Proto().op.extend([eltwise_linear])
if do_dequantize:
dequantize = core.CreateOperator(
"Dequantize", ["Y_q"], ["Y"], engine=engine, device_option=gc
)
net.Proto().op.extend([dequantize])
self.ws.create_blob("X").feed(X, device_option=gc)
self.ws.create_blob("a").feed(a, device_option=gc)
self.ws.create_blob("b").feed(b, device_option=gc)
self.ws.run(net)
outputs.append(
Output(Y=self.ws.blobs["Y"].fetch(), op_type=op_type, engine=engine)
)
check_quantized_results_close(outputs)
|
pytorch-master
|
caffe2/quantization/server/elementwise_linear_dnnlowp_op_test.py
|
"""Adds docstrings to Storage functions"""
import torch._C
from torch._C import _add_docstr as add_docstr
storage_classes = [
"StorageBase",
]
def add_docstr_all(method, docstr):
for cls_name in storage_classes:
cls = getattr(torch._C, cls_name)
try:
add_docstr(getattr(cls, method), docstr)
except AttributeError:
pass
add_docstr_all(
"from_file",
"""
from_file(filename, shared=False, size=0) -> Storage
If `shared` is `True`, then memory is shared between all processes.
All changes are written to the file. If `shared` is `False`, then the changes on
the storage do not affect the file.
`size` is the number of elements in the storage. If `shared` is `False`,
then the file must contain at least `size * sizeof(Type)` bytes
(`Type` is the type of storage). If `shared` is `True` the file will be
created if needed.
Args:
filename (str): file name to map
shared (bool): whether to share memory
size (int): number of elements in the storage
""",
)
|
pytorch-master
|
torch/_storage_docs.py
|
from typing import List, Optional, Union
import torch
import torch._prims_common as utils
from torch import Tensor
from torch._prims_common import (
check,
corresponding_complex_dtype,
corresponding_real_dtype,
elementwise_dtypes,
ELEMENTWISE_TYPE_PROMOTION_KIND,
)
from torch._prims_common.wrappers import out_wrapper
from torch._refs import _broadcast_shapes
from torch.utils._pytree import tree_map
aten = torch.ops.aten
meta_lib = torch.library.Library("aten", "IMPL", "Meta")
meta_table = {}
def register_meta(op, register_dispatcher=True):
def wrapper(f):
def add_func(op):
meta_table[op] = f
if register_dispatcher:
name = (
op.__name__
if op._overloadname != "default"
else op.overloadpacket.__name__
)
meta_lib.impl(name, f)
tree_map(add_func, op)
return f
return wrapper
def toRealValueType(dtype):
from_complex = {
torch.complex32: torch.half,
torch.cfloat: torch.float,
torch.cdouble: torch.double,
}
return from_complex.get(dtype, dtype)
@register_meta(aten._fft_c2c.default)
def meta_fft_c2c(self, dim, normalization, forward):
assert self.dtype.is_complex
return self.new_empty(self.size())
@register_meta(aten._fft_r2c.default)
def meta_fft_r2c(self, dim, normalization, onesided):
assert self.dtype.is_floating_point
output_sizes = list(self.size())
if onesided:
last_dim = dim[-1]
last_dim_halfsize = (output_sizes[last_dim] // 2) + 1
output_sizes[last_dim] = last_dim_halfsize
return self.new_empty(
output_sizes, dtype=utils.corresponding_complex_dtype(self.dtype)
)
@register_meta(aten.randperm.generator_out)
def meta_randperm(n, *, generator=None, out):
assert out.ndim == 1 and out.size(0) == n
return out
@register_meta([aten._fft_c2r.default, aten._fft_c2r.out])
@out_wrapper()
def meta_fft_c2r(self, dim, normalization, lastdim):
assert self.dtype.is_complex
output_sizes = list(self.size())
output_sizes[dim[-1]] = lastdim
return self.new_empty(output_sizes, dtype=toRealValueType(self.dtype))
# Implementations below are taken from https://github.com/albanD/subclass_zoo/blob/main/python_meta_tensor.py
@register_meta(aten.index_select.default)
def meta_index_select(self, dim, index):
result_size = list(self.size())
if self.dim() > 0:
result_size[dim] = index.numel()
return self.new_empty(result_size)
@register_meta(aten.index_select.out)
def meta_index_select_out(self, dim, index, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.index_select(self, dim, index))
@register_meta([aten.max.default, aten.min.default])
def meta_max(self):
return self.new_empty(())
@register_meta(aten.angle.default)
def meta_angle(self):
if self.is_complex():
result_dtype = corresponding_real_dtype(self.dtype)
else:
_, result_dtype = elementwise_dtypes(
self, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT
)
return self.new_empty(self.size(), dtype=result_dtype)
@register_meta(aten.angle.out)
def meta_angle_out(self, out):
torch._resize_output_(out, self.size(), self.device)
return out.copy_(torch.angle(self))
def squareCheckInputs(self, f_name):
assert (
self.dim() >= 2
), f"{f_name}: The input tensor must have at least 2 dimensions."
assert self.size(-1) == self.size(
-2
), f"{f_name}: A must be batches of square matrices, but they are {self.size(-2)} by {self.size(-1)} matrices"
def checkUplo(uplo: str):
uplo_uppercase = uplo.upper()
assert (
len(uplo) == 1 and uplo_uppercase == "U" or uplo_uppercase == "L"
), f"Expected UPLO argument to be 'L' or 'U', but got {uplo}"
# @register_meta(aten.linalg_eigh.default)
def meta_linalg_eigh(self, uplo="L"):
squareCheckInputs(self, "linalg_eigh")
checkUplo(uplo)
real_dtype = toRealValueType(self.dtype)
assert self.dim() >= 2
values = self.new_empty(self.shape, dtype=real_dtype)
values.transpose_(-2, -1)
vectors = self.new_empty(self.shape[:-1])
return (values, vectors)
@register_meta(aten.reflection_pad2d.default)
def meta_pad2d(self, padding):
valid_dims = self.size(1) != 0 and self.size(2) != 0
check(
(self.ndim == 3 and valid_dims)
or (self.ndim == 4 and valid_dims and self.size(3) != 0),
lambda: f"3D or 4D (batch mode) tensor expected for input, but got: {self}",
)
if self.ndim == 4:
nbatch, nplane, input_h, input_w = self.shape
else:
nbatch = 1
nplane, input_h, input_w = self.shape
pad_l, pad_r, pad_t, pad_b = padding
output_h = input_h + pad_t + pad_b
output_w = input_w + pad_l + pad_r
if self.ndim == 3:
return self.new_empty((nplane, output_h, output_w))
else:
return self.new_empty((nbatch, nplane, output_h, output_w))
def dot_check(self, other):
check(
self.dim() == 1 and other.dim() == 1,
lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors",
)
@register_meta(aten.dot.default)
def meta_dot(self, tensor):
dot_check(self, tensor)
return self.new_empty(())
def _compute_reduction_shape(self, dims, keepdim):
if keepdim:
return tuple(self.shape[i] if i not in dims else 1 for i in range(self.ndim))
return utils.compute_reduction_output_shape(self.shape, dims)
@register_meta(aten.inverse.default)
def meta_inverse(self):
# Bug: https://github.com/pytorch/pytorch/issues/77498
if self.numel() == 0:
return torch.empty_like(self)
r = self.new_empty(self.shape)
r.transpose_(-2, -1)
return r
@torch.library.impl(meta_lib, "bernoulli.out")
def meta_bernoulli(self, *, generator=None, out):
torch._resize_output_(out, self.size(), self.device)
return out
@register_meta(aten.convolution.default)
def meta_conv(
input_tensor: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
stride: List[int],
padding: List[int],
dilation: List[int],
is_transposed: bool,
output_padding: List[int],
groups: int,
):
def _formula(ln: int, p: int, d: int, k: int, s: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
Returns:
The output length
"""
return (ln + 2 * p - d * (k - 1) - 1) // s + 1
def _formula_transposed(ln: int, p: int, d: int, k: int, s: int, op: int) -> int:
"""
Formula to apply to calculate the length of some dimension of the output
if transposed convolution is used.
See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html
Args:
ln: length of the dimension
p: padding in that dim
d: dilation in that dim
k: kernel size in that dim
s: stride in that dim
op: output padding in that dim
Returns:
The output length
"""
return (ln - 1) * s - 2 * p + d * (k - 1) + op + 1
def calc_conv_nd_return_shape(
dims: torch.Size,
kernel_size: torch.Size,
stride: Union[List[int], int],
padding: Union[List[int], int],
dilation: Union[List[int], int],
output_padding: Optional[Union[List[int], int]] = None,
):
ret_shape = []
if isinstance(stride, int):
stride = [stride] * len(dims)
elif len(stride) == 1:
stride = [stride[0]] * len(dims)
if isinstance(padding, int):
padding = [padding] * len(dims)
elif len(padding) == 1:
padding = [padding[0]] * len(dims)
if isinstance(dilation, int):
dilation = [dilation] * len(dims)
elif len(dilation) == 1:
dilation = [dilation[0]] * len(dims)
output_padding_list: Optional[List[int]] = None
if output_padding:
if isinstance(output_padding, int):
output_padding_list = [output_padding] * len(dims)
elif len(output_padding) == 1:
output_padding_list = [output_padding[0]] * len(dims)
else:
output_padding_list = output_padding
for i in range(len(dims)):
# If output_padding is present, we are dealing with a transposed convolution
if output_padding_list:
ret_shape.append(
_formula_transposed(
dims[i],
padding[i],
dilation[i],
kernel_size[i],
stride[i],
output_padding_list[i],
)
)
else:
ret_shape.append(
_formula(
dims[i], padding[i], dilation[i], kernel_size[i], stride[i]
)
)
return ret_shape
def pick_memory_format():
if input_tensor.is_contiguous(memory_format=torch.channels_last):
return torch.channels_last
elif input_tensor.is_contiguous(memory_format=torch.contiguous_format):
return torch.contiguous_format
elif input_tensor.is_contiguous(memory_format=torch.preserve_format):
return torch.preserve_format
kernel_size = weight.shape[2:]
dims = input_tensor.shape[2:]
if is_transposed:
out_channels = groups * weight.shape[1]
shape_out = calc_conv_nd_return_shape(
dims,
kernel_size,
stride,
padding,
dilation,
output_padding,
)
else:
out_channels = weight.shape[0]
if weight.shape[1] != input_tensor.shape[1] / groups:
raise RuntimeError("Invalid channel dimensions")
shape_out = calc_conv_nd_return_shape(
dims, kernel_size, stride, padding, dilation
)
out = input_tensor.new_empty((input_tensor.shape[0], out_channels, *shape_out))
mem_fmt = pick_memory_format()
out = out.to(memory_format=mem_fmt) # type: ignore[call-overload]
return out
@register_meta(aten._adaptive_avg_pool2d.default)
def meta_adaptive_avg_pool2d(self, output_size):
check(
self.ndim == 3 or self.ndim == 4,
lambda: f"Expected 3D or 4D tensor, but got {self.shape}",
)
return self.new_empty(self.shape[:-2] + tuple(output_size))
@register_meta(aten._adaptive_avg_pool3d.default)
def meta_adaptive_avg_pool3d(self, output_size):
check(
self.ndim == 4 or self.ndim == 5,
lambda: f"Expected 4D or 5D tensor, but got {self.shape}",
)
return self.new_empty(self.shape[:-3] + tuple(output_size))
@register_meta(aten.repeat_interleave.Tensor)
def meta_repeat_interleave_Tensor(repeats, output_size=None):
if output_size is None:
raise RuntimeError("cannot repeat_interleave a meta tensor without output_size")
return repeats.new_empty(output_size)
@torch.library.impl(meta_lib, "complex")
@torch.library.impl(meta_lib, "complex.out")
@out_wrapper()
def meta_complex(real, imag):
assert real.dtype.is_floating_point
assert imag.dtype.is_floating_point
out_shape = _broadcast_shapes(real.shape, imag.shape)
return real.new_empty(out_shape, dtype=corresponding_complex_dtype(real.dtype))
@torch.library.impl(meta_lib, "vdot")
def vdot(self, other):
if not self.is_complex:
return torch.dot(self, other)
if self.is_conj():
if other.is_conj():
return torch.vdot(other.conj(), self.conj())
else:
return torch.dot(self.conj(), other)
elif other.is_conj():
return torch.dot(self, other.conj()).conj()
dot_check(self, other)
return self.new_empty(())
# Leaving this function around because a python implementation
# of indexing shape inference is useful,
# but not registering it to the dispatcher because we already
# get shape inference through structured kernels
@register_meta(aten.index.Tensor, register_dispatcher=False)
def meta_index_Tensor(self, indices):
check(indices, lambda: "at least one index must be provided")
# aten::index is the internal advanced indexing implementation
# checkIndexTensorTypes and expandTensors
result: List[Optional[Tensor]] = []
for i, index in enumerate(indices):
if index is not None:
check(
index.dtype in [torch.long, torch.int8, torch.bool],
lambda: "tensors used as indices must be long, byte or bool tensors",
)
if index.dtype in [torch.int8, torch.bool]:
nonzero = index.nonzero()
k = len(result)
check(
k + index.ndim <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim}",
IndexError,
)
for j in range(index.ndim):
check(
index.shape[j] == self.shape[k + j],
lambda: f"The shape of the mask {index.shape} at index {i} "
f"does not match the shape of the indexed tensor {self.shape} at index {k + j}",
IndexError,
)
result.append(nonzero.select(1, j))
else:
result.append(index)
else:
result.append(index)
indices = result
check(
len(indices) <= self.ndim,
lambda: f"too many indices for tensor of dimension {self.ndim} (got {len(indices)})",
)
# expand_outplace
import torch._refs as refs # avoid import cycle in mypy
indices = list(refs._maybe_broadcast(*indices))
# add missing null tensors
while len(indices) < self.ndim:
indices.append(None)
# hasContiguousSubspace
# true if all non-null tensors are adjacent
# See:
# https://numpy.org/doc/stable/user/basics.indexing.html#combining-advanced-and-basic-indexing
# https://stackoverflow.com/questions/53841497/why-does-numpy-mixed-basic-advanced-indexing-depend-on-slice-adjacency
state = 0
has_contiguous_subspace = False
for index in indices:
if state == 0:
if index is not None:
state = 1
elif state == 1:
if index is None:
state = 2
else:
if index is not None:
break
else:
has_contiguous_subspace = True
# transposeToFront
# This is the logic that causes the newly inserted dimensions to show up
# at the beginning of the tensor, if they're not contiguous
if not has_contiguous_subspace:
dims = []
transposed_indices = []
for i, index in enumerate(indices):
if index is not None:
dims.append(i)
transposed_indices.append(index)
for i, index in enumerate(indices):
if index is None:
dims.append(i)
transposed_indices.append(index)
self = self.permute(dims)
indices = transposed_indices
# AdvancedIndex::AdvancedIndex
# Now we can assume the indices have contiguous subspace
# This is simplified from AdvancedIndex which goes to more effort
# to put the input and indices in a form so that TensorIterator can
# take them. If we write a ref for this, probably that logic should
# get implemented
before_shape: List[int] = []
after_shape: List[int] = []
replacement_shape: List[int] = []
for dim, index in enumerate(indices):
if index is None:
if replacement_shape:
after_shape.append(self.shape[dim])
else:
before_shape.append(self.shape[dim])
else:
replacement_shape = list(index.shape)
return self.new_empty(before_shape + replacement_shape + after_shape)
@register_meta([aten.addbmm.default, aten.addbmm.out])
@out_wrapper()
def meta_addbmm(self, batch1, batch2, *, beta=1, alpha=1):
dim1 = batch1.size(1)
dim2 = batch2.size(2)
self = self.expand((dim1, dim2))
check(batch1.dim() == 3, lambda: "batch1 must be a 3D tensor")
check(batch2.dim() == 3, lambda: "batch2 must be a 3D tensor")
check(
batch1.size(0) == batch2.size(0),
lambda: f"batch1 and batch2 must have same number of batches, got {batch1.size(0)} and {batch2.size(0)}",
)
check(
batch1.size(2) == batch2.size(1),
lambda: (
f"Incompatible matrix sizes for bmm ({batch1.size(1)}x{batch1.size(2)} "
f"and {batch2.size(1)}x{batch2.size(2)})"
),
)
check(
self.size(0) == dim1 and self.size(1) == dim2,
lambda: "self tensor does not match matmul output shape",
)
return self.new_empty(self.size())
@torch.library.impl(meta_lib, "_cdist_forward")
def meta_cdist_forward(x1, x2, p, compute_mode):
check(
x1.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X1 got: {x1.dim()}D",
)
check(
x2.dim() >= 2,
lambda: f"cdist only supports at least 2D tensors, X2 got: {x2.dim()}D",
)
check(
x1.size(-1) == x2.size(-1),
lambda: f"X1 and X2 must have the same number of columns. X1: {x1.size(-1)} X2: {x2.size(-1)}",
)
check(
utils.is_float_dtype(x1.dtype),
lambda: "cdist only supports floating-point dtypes, X1 got: {x1.dtype}",
)
check(
utils.is_float_dtype(x2.dtype),
lambda: "cdist only supports floating-point dtypes, X2 got: {x2.dtype}",
)
check(p >= 0, lambda: "cdist only supports non-negative p values")
check(
compute_mode >= 0 and compute_mode <= 2,
lambda: f"possible modes: 0, 1, 2, but was: {compute_mode}",
)
r1 = x1.size(-2)
r2 = x2.size(-2)
batch_tensor1 = x1.shape[:-2]
batch_tensor2 = x2.shape[:-2]
output_shape = list(torch.broadcast_shapes(batch_tensor1, batch_tensor2))
output_shape.extend([r1, r2])
return x1.new_empty(output_shape)
@torch.library.impl(meta_lib, "_embedding_bag")
def meta_embedding_bag(
weight,
indices,
offsets,
scale_grad_by_freq=False,
mode=0,
sparse=False,
per_sample_weights=None,
include_last_offset=False,
padding_idx=-1,
):
check(
indices.dtype in (torch.long, torch.int),
lambda: f"expected indices to be long or int, got {indices.dtype}",
)
check(
offsets.dtype in (torch.long, torch.int),
lambda: f"expected offsets to be long or int, got {offsets.dtype}",
)
check(
utils.is_float_dtype(weight.dtype),
lambda: f"expected weight to be floating point type, got {weight.dtype}",
)
num_bags = offsets.size(0)
if include_last_offset:
check(
num_bags >= 1, lambda: "include_last_offset: numBags should be at least 1"
)
num_bags -= 1
output = weight.new_empty(num_bags, weight.size(1))
MODE_SUM, MODE_MEAN, MODE_MAX = range(3)
if per_sample_weights is not None:
check(
mode == MODE_SUM,
lambda: "embedding_bag: per_sample_weights only supported with mode='sum'",
)
check(
per_sample_weights.dtype == weight.dtype,
lambda: f"expected weight ({weight.dtype}) and per_sample_weights ({per_sample_weights.dtype}) to have same dtype",
)
check(
per_sample_weights.ndim == 1,
lambda: f"expected per_sample_weights to be 1D tensor, got {per_sample_weights.ndim}D",
)
check(
per_sample_weights.numel() == indices.numel(),
lambda: (
f"expected per_sample_weights.numel() ({per_sample_weights.numel()} "
f"to be the same as indices.numel() ({indices.numel()})"
),
)
def is_fast_path_index_select_scale(src, scale, output, padding_idx):
return (
is_fast_path_index_select(src, output, padding_idx) and scale.stride(0) == 1
)
def is_fast_path_index_select(src, output, padding_idx):
return (
(src.dtype == torch.float or src.dtype == torch.half)
and src.stride(1) == 1
and output.stride(1) == 1
and padding_idx < 0
)
def is_fast_path(src, scale, output, padding_idx):
if scale is not None:
return is_fast_path_index_select_scale(src, scale, output, padding_idx)
else:
return is_fast_path_index_select(src, output, padding_idx)
if offsets.device.type != "cpu":
offset2bag = indices.new_empty(indices.size(0))
bag_size = indices.new_empty(offsets.size())
if mode == MODE_MAX:
max_indices = indices.new_empty(num_bags, weight.size(1))
else:
max_indices = indices.new_empty(0)
else:
fast_path_sum = is_fast_path(weight, per_sample_weights, output, padding_idx)
if mode == MODE_MEAN or mode == MODE_MAX or not fast_path_sum:
offset2bag = offsets.new_empty(indices.size(0))
else:
offset2bag = offsets.new_empty(0)
bag_size = offsets.new_empty(num_bags)
max_indices = offsets.new_empty(bag_size.size())
return output, offset2bag, bag_size, max_indices
@register_meta([aten.diag.default, aten.diag.out])
@out_wrapper()
def meta_diag(self, dim=0):
check(self.dim() in (1, 2), lambda: "matrix or a vector expected")
if self.dim() == 1:
sz = self.size(0) + abs(dim)
return self.new_empty((sz, sz))
# case: dim is 2
if dim >= 0:
sz = min(self.size(0), self.size(1) - dim)
else:
sz = min(self.size(0) + dim, self.size(1))
return self.new_empty((sz,))
@torch.library.impl(meta_lib, "_embedding_bag_forward_only")
def meta_embedding_bag_forward_only(weight, indices, offsets, *args):
output, offset2bag, bag_size, max_indices = meta_embedding_bag(
weight, indices, offsets, *args
)
if offsets.device.type == "cpu":
bag_size = offsets.new_empty(offsets.size())
return output, offset2bag, bag_size, max_indices
def _get_reduction_dtype(input, dtype, promote_int_to_long=True):
# if specified, dtype takes precedence
if dtype:
return dtype
if input.dtype.is_floating_point or input.dtype.is_complex:
return input.dtype
elif promote_int_to_long:
return torch.long
return input.dtype
@register_meta([aten.nansum.default, aten.nansum.out])
@out_wrapper()
def meta_nansum(input, dims=None, keepdim=False, *, dtype=None):
output_dtype = _get_reduction_dtype(input, dtype, promote_int_to_long=True)
dims = utils.reduction_dims(input.shape, dims)
output_shape = _compute_reduction_shape(input, dims, keepdim)
return input.new_empty(output_shape, dtype=output_dtype)
@register_meta(aten.nanmedian.default)
def meta_nanmedian(input):
output_shape = utils.compute_reduction_output_shape(
input.shape, tuple(range(input.dim()))
)
return input.new_empty(output_shape)
@register_meta([aten.nanmedian.dim, aten.nanmedian.dim_values])
@out_wrapper("values", "indices")
def meta_nanmedian_dim(input, dim=-1, keepdim=False):
dim = utils.reduction_dims(input.shape, (dim,))
output_shape = _compute_reduction_shape(input, dim, keepdim)
return (
input.new_empty(output_shape),
input.new_empty(output_shape, dtype=torch.long),
)
@torch.library.impl(meta_lib, "logical_not_")
def meta_logical_not_(self):
return self
# We must also trigger meta registrations from PrimTorch ref
# decompositions
import torch._refs
import torch._refs.nn.functional
import torch._refs.special
|
pytorch-master
|
torch/_meta_registrations.py
|
from ._ops import OpOverload
from typing import Set
import traceback
import torch
__all__ = ['Library', 'impl', 'define']
# Set containing the combination of (namespace, operator, DispatchKey) for which a new kernel has been registered
# The keys in the set are of the form `namespace + "/" + op_name + "/" + dispatch_key`.
# This set is maintained to ensure that two libraries don't try to override the exact same functionality to avoid
# libraries calling into kernels not intended to be called.
_impls: Set[str] = set()
# prim is reserved by TorchScript interpreter
_reserved_namespaces = ['prim']
class Library:
"""
A class to create libraries that can be used to register new operators or
override operators in existing libraries from Python.
A user can optionally pass in a dispatch keyname if they only want to register
kernels corresponding to only one specific dispatch key.
To create a library to override operators in an existing library (with name ns), set the kind to "IMPL".
To create a new library (with name ns) to register new operators, set the kind to "DEF".
Args:
ns: library name
kind: "DEF", "IMPL" (default: "IMPL")
dispatch_key: PyTorch dispatch key (default: "")
"""
def __init__(self, ns, kind, dispatch_key=""):
if kind != "IMPL" and kind != "DEF":
raise ValueError("Unsupported kind: ", kind)
if ns in _reserved_namespaces and kind == "DEF":
raise ValueError(ns, " is a reserved namespace. Please try creating a library with another name.")
frame = traceback.extract_stack(limit=3)[0]
filename, lineno = frame.filename, frame.lineno
self.m = torch._C._dispatch_library(kind, ns, dispatch_key, filename, lineno)
self.ns = ns
self._op_impls = set()
self.kind = kind
self.dispatch_key = dispatch_key
def __repr__(self):
return "Library(kind={}, ns={}, dispatch_key={})>".format(self.kind, self.ns, self.dispatch_key)
def define(self, schema, alias_analysis=""):
r'''Defines a new operator and its semantics in the ns namespace.
Args:
schema: function schema to define a new operator.
alias_analysis (optional): Indicates if the aliasing properties of the operator arguments can be
inferred from the schema (default behavior) or not ("CONSERVATIVE").
Returns:
name of the operator as inferred from the schema.
Example::
>>> my_lib = Library("foo", "DEF")
>>> my_lib.define("sum(Tensor self) -> Tensor")
'''
# This is added because we also want to disallow PURE_FUNCTION alias analysis which is a valid
# AliasAnalysis type in C++
if alias_analysis not in ["", "FROM_SCHEMA", "CONSERVATIVE"]:
raise RuntimeError("Invalid alias_analysis type {}".format(alias_analysis))
return self.m.define(schema, alias_analysis)
def impl(self, op_name, fn, dispatch_key=''):
r'''Registers the function implementation for an operator defined in the library.
Args:
op_name: operator name (along with the overload) or OpOverload object.
fn: function that's the operator implementation for the input dispatch key.
dispatch_key: dispatch key that the input function should be registered for. By default, it uses
the dispatch key that the library was created with.
Example::
>>> # xdoctest: +SKIP
>>> my_lib = Library("aten", "IMPL")
>>> def div_cpu(self, other):
>>> return self * (1 / other)
>>> my_lib.impl("div.Tensor", "CPU")
'''
if not callable(fn):
raise TypeError("Input function is required to be a callable but found type {}".format(type(fn)))
if dispatch_key == '':
dispatch_key = self.dispatch_key
if isinstance(op_name, str):
name = op_name
elif isinstance(op_name, OpOverload):
name = op_name._schema.name
overload_name = op_name._schema.overload_name
if overload_name != '':
name = name + '.' + overload_name
else:
raise RuntimeError("impl should be passed either a name or an OpOverload object as the first argument")
key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key
if key in _impls:
# TODO: in future, add more info about where the existing function is registered (this info is
# today already returned by the C++ warning when impl is called but we error out before that)
raise RuntimeError("This is not allowed since there's already a kernel registered from python overriding {}"
"'s behavior for {} dispatch key and {} namespace.".
format(name.split("::")[-1], dispatch_key, self.ns))
if dispatch_key == "Meta":
dispatcher_op_name = name
if '::' not in dispatcher_op_name:
dispatcher_op_name = f'{self.ns}::{dispatcher_op_name}'
# get a string containing the names of every dispatch key that the operator has a registration for.
dispatch_key_registration = torch._C._dispatch_dump(dispatcher_op_name)
# Internally, we shouldn't be registering meta kernels for any operators that
# have CompositeImplicitAutograd kernels.
# Instead, we should be letting those decompositions run, and writing meta kernels
# only for the base operators.
if 'CompositeImplicitAutograd' in dispatch_key_registration:
raise RuntimeError(
f"We should not register a meta kernel directly to the operator '{name}',"
" because it has a CompositeImplicitAutograd kernel in core."
" Instead we should let the operator decompose, and ensure that we have meta kernels"
" for the base ops that it decomposes into.")
self.m.impl(name, dispatch_key, fn)
_impls.add(key)
self._op_impls.add(key)
def __del__(self):
# _op_impls might not have been initialized if an error was thrown in __init__
_op_impls_ = getattr(self, '_op_impls', None)
if _op_impls_:
for key in self._op_impls:
_impls.remove(key)
del self.m
# decorator to register python functions for library ops
# Note: this decorator API should remain consistent with `Library.impl` API
def impl(lib, name, dispatch_key=""):
def wrap(f):
lib.impl(name, f, dispatch_key)
return f
return wrap
def define(lib, schema, alias_analysis=""):
def wrap(f):
name = lib.define(schema, alias_analysis)
lib.impl(name, f)
return f
return wrap
|
pytorch-master
|
torch/library.py
|
"""
The weak_script annotation needs to be here instead of inside torch/jit/ so it
can be used in other places in torch/ (namely torch.nn) without running into
circular dependency problems
"""
import ast
import builtins
import collections
import contextlib
import enum
import inspect
import io
import pickle
import sys
import threading
import typing
import warnings
import weakref
from textwrap import dedent
from typing import ( # noqa: F401
Any,
Callable,
Dict,
Generic,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import torch
# This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`.
# Explicitly ask to import `torch.distributed.__init__` first.
# Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised.
import torch.distributed.rpc
import torch.package._mangling as package_mangling
from torch._C import Future as CFuture
from torch._sources import fake_range, get_source_lines_and_file, parse_def
from torch.futures import Future
if sys.version_info[:2] > (3, 7):
from typing import Final
else:
from typing_extensions import Final
LockType: Type
try:
import _thread
LockType = _thread.LockType
except ImportError:
import _dummy_thread
LockType = _dummy_thread.LockType
# Wrapper functions that can call either of 2 functions depending on a boolean
# argument
boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = (
weakref.WeakKeyDictionary()
) # noqa: T484
FAKE_FILENAME_PREFIX = "__torch_jit_dataclass"
class SourceLoader:
def __init__(self):
self.content = {}
def cache(self, fn, source):
self.content[fn] = source
def get_source(self, fn):
return self.content.get(fn)
loader = SourceLoader()
def createResolutionCallbackFromEnv(lookup_base):
"""
Creates a resolution callback that will look up qualified names in an
environment, starting with `lookup_base` for the base of any qualified
names, then proceeding down the lookup chain with the resolved object.
You should not use this directly, it should only be used from the other
createResolutionCallbackFrom* functions.
"""
def lookupInModule(qualified_name, module):
if "." in qualified_name:
parts = qualified_name.split(".")
base = parts[0]
remaining_pieces = ".".join(parts[1:])
module_value = getattr(module, base)
return lookupInModule(remaining_pieces, module_value)
else:
return getattr(module, qualified_name)
def parseNestedExpr(expr, module) -> Tuple[Any, int]:
i = 0
while i < len(expr) and expr[i] not in (",", "[", "]"):
i += 1
# Special case logic for the empty Tuple as a subscript (used
# in the type annotation `Tuple[()]`)
if expr[:i] == "()":
return (), i
base = lookupInModule(expr[:i].strip(), module)
assert base is not None, f"Unresolvable type {expr[:i]}"
if i == len(expr) or expr[i] != "[":
return base, i
assert expr[i] == "["
parts = []
while expr[i] != "]":
part_len = 0
i += 1
part, part_len = parseNestedExpr(expr[i:], module)
parts.append(part)
i += part_len
if len(parts) > 1:
return base[tuple(parts)], i + 1
else:
return base[parts[0]], i + 1
def parseExpr(expr, module):
try:
value, len_parsed = parseNestedExpr(expr, module)
assert len_parsed == len(
expr
), "whole expression was not parsed, falling back to c++ parser"
return value
except Exception:
"""
The python resolver fails in several cases in known unit tests, and is intended
to fall back gracefully to the c++ resolver in general. For example, python 2 style
annotations which are frequent in our unit tests often fail with types e.g. int not
resolvable from the calling frame.
"""
return None
return lambda expr: parseExpr(expr, lookup_base)
def createResolutionCallbackFromFrame(frames_up: int = 0):
"""
Creates a function which, given a string variable name,
returns the value of the variable in the scope of the caller of
the function which called createResolutionCallbackFromFrame (by default).
This is used to enable access in-scope Python variables inside
TorchScript fragments.
frames_up is number of additional frames to go up on the stack.
The default value is 0, which correspond to the frame of the caller
of createResolutionCallbackFromFrame. Also for example, if frames_up is set
to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame
will be taken.
For example, the following program prints 2::
def bar():
cb = createResolutionCallbackFromFrame(1)
print(cb("foo"))
def baz():
foo = 2
bar()
baz()
"""
frame = inspect.currentframe()
i = 0
while i < frames_up + 1:
assert frame is not None
frame = frame.f_back
i += 1
assert frame is not None
f_locals = frame.f_locals
f_globals = frame.f_globals
class env(object):
def __getattr__(self, key):
if key in f_locals:
return f_locals[key]
elif key in f_globals:
return f_globals[key]
elif key in dir(builtins):
return getattr(builtins, key)
return createResolutionCallbackFromEnv(env())
def get_closure(fn):
"""
Get a dictionary of closed over variables from a function
"""
captures = {}
captures.update(fn.__globals__)
for index, captured_name in enumerate(fn.__code__.co_freevars):
captures[captured_name] = fn.__closure__[index].cell_contents
return captures
# [local resolution in python]
# Depending on where a variable is defined, and where it is used, we may
# or may not be able to recover its value when recursively compiling a
# script function. Remember in the general case, a module or function is
# first defined and then later scripted. This means we do not have a
# chance to capture the active frames when the function is defined. Hence any
# name resolution has to happen later on the created closure. The way
# python captures type annotations restricts what we can recover. The
# follow example illustrates the different cases:
#
# class MyGlobalClass:
# ...
# def my_local_scope():
# @torch.jit.script
# class MyClass:
# ...
# @torch.jit.script
# class MyClassUsedAsVar:
# ...
# def eg(x: MyClass, y: MyGlobalClass):
# a_local_capture : Foo
# return MyClassUsedAsVar(x)
#
# MyGlobalClass is defined in the __globals__ dictionary of function
# 'eg', so it is always recoverable. my_local_scope introduces a new local
# variable scope in the function. Classes defined here are only visible as
# local variables. For the case of MyClassUsedAsVar, it is captured
# because it is used as a variable inside the body of the function, and we
# can resolve it using the captures returned from `get_closure`. However,
# the type annotations are not captured by the closure. In Python
# 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as
# annotations on `eg``, but starting in Python 4.0, they will represented as
# strings and no longer present. Furthermore, since the body of `eg` does
# not reference those names, they do not appear in the list of closed over
# variables. In Python 2.x, type annotations are in comments, leading to a
# similar situation where their definitions are not available. We anticipate
# that most users will not run into this issue because their modules and
# functions will be defined at a global scope like MyGlobalClass. In cases
# where they are not, it is possible to work around issues by declaring the
# values global in the function.
# In Python 3.9 declaring class as global will make it invisible to
# `inspect.getsource`, see https://bugs.python.org/issue42666 .
# This could be worked around by manualy adding it to `global()` dictionary.
def createResolutionCallbackFromClosure(fn):
"""
Create a resolutionCallback by introspecting the function instead of
looking up the stack for the enclosing scope
"""
closure = get_closure(fn)
class closure_lookup(object):
# This is a class since `closure` is a dict and it's easier in
# `env_helper` if everything just works with `getattr` calls
def __getattr__(self, key):
if key in closure:
return closure[key]
elif hasattr(typing, key):
return getattr(typing, key)
elif hasattr(builtins, key):
return getattr(builtins, key)
return None
return createResolutionCallbackFromEnv(closure_lookup())
def can_compile_class(cls) -> bool:
# If any of the functions on a type don't have a code object, this type can't
# be compiled and is probably a builtin / bound from C
if is_ignored_fn(cls):
return False
# Ignore the following list of built-in classes.
ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
if issubclass(cls, ignored_builtin_classes):
return False
names = cls.__dict__
fns = [
getattr(cls, name)
for name in names
if inspect.isroutine(getattr(cls, name, None))
]
has_code = [hasattr(fn, "__code__") for fn in fns]
return all(has_code)
def get_callable_argument_names(fn) -> List[str]:
"""
Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`.
Returns an empty list when other types of arguments are present.
This is used by `torch.jit.trace` to assign meaningful argument names to
traced functions and modules.
Args:
fn: A callable.
Returns:
Argument names: List[str]
"""
# inspect.signature may fail, give up in that case.
try:
callable_signature = inspect.signature(fn)
except Exception:
return []
argument_names = []
for name, param in callable_signature.parameters.items():
# All four other types of arguments do not map to individual values
# with a keyword as name.
if not param.kind == param.POSITIONAL_OR_KEYWORD:
return []
argument_names.append(name)
return argument_names
def get_annotation_str(annotation):
"""
Convert an AST node containing a type annotation to the string present in the source
that represents the same annotation.
"""
if isinstance(annotation, ast.Name):
return annotation.id
elif isinstance(annotation, ast.Attribute):
return ".".join([get_annotation_str(annotation.value), annotation.attr])
elif isinstance(annotation, ast.Subscript):
# In Python3.9+ subscript indicies are not wrapped in ast.Index
subscript_slice = annotation.slice if sys.version_info >= (3, 9) else annotation.slice.value # type: ignore[attr-defined]
return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]"
elif isinstance(annotation, ast.Tuple):
return ",".join([get_annotation_str(elt) for elt in annotation.elts])
elif isinstance(annotation, ast.Constant) or isinstance(
annotation, ast.NameConstant
):
return f"{annotation.value}"
# If an AST node is not handled here, it's probably handled in ScriptTypeParser.
return None
def get_type_hint_captures(fn):
"""
Get a dictionary containing type resolution mappings necessary to resolve types
for the literal annotations on 'fn'. These are not considered to be closed-over by fn
and must be obtained separately (e.g. using this function).
Args:
fn: A callable.
Returns:
A Dict[str, Any] containing a mapping from the literal annotations used on
fn to the Python objects they refer to.
"""
# First, try to get the source of the function. We'll need to parse it to find the actual string names
# that were used to annotate the types, since inspect.signature() will only return the class object that
# the annotation refers to, not the string name. If we can't get the source, simply return an empty dict.
# This may happen in cases where the function is synthesized dynamically at runtime.
src = loader.get_source(fn)
if src is None:
src = inspect.getsource(fn)
# Gather a dictionary of parameter name -> type, skipping any parameters whose annotated
# types are strings. These are only understood by TorchScript in the context of a type annotation
# that refers to a class in its own definition, but trying to include a mapping for this in the result
# function would cause infinite recursion because the class is currently being compiled.
# In addition, there is logic in ScriptTypeParser to handle this.
signature = inspect.signature(fn)
name_to_type = {
name: parameter.annotation
for name, parameter in signature.parameters.items()
if parameter.annotation is not inspect.Parameter.empty
and not isinstance(parameter.annotation, str)
}
# Then, get the literal type annotations from the function declaration
# by source inspection. This accounts for the case in which aliases are used
# to annotate the arguments (e.g device_t = torch.device, and then d: device_t).
# frontend.py cannot be used here because it includes _jit_internal, so use ast instead.
a = ast.parse(dedent(src))
if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef):
raise RuntimeError(f"Expected {fn} to be a function")
f = a.body[0]
# Prepare a dictionary of source annotation -> type, which will be the final result of this function,
# by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping
# them to the type object corresponding to the annotation via name_to_type using the parameter name.
annotation_to_type = {}
for arg in f.args.args:
# Get the source type annotation string for this argument if possible.
arg_annotation_str = (
get_annotation_str(arg.annotation) if arg.annotation else None
)
# If the argument has no annotation or get_annotation_str cannot convert it to a string,
# arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle
# this in the latter case.
if arg_annotation_str is None:
continue
# Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not
# be present in name_to_type is that the annotation itself is a string and not a type object
# (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this.
arg_name = arg.arg
if arg_name in name_to_type:
annotation_to_type[arg_annotation_str] = name_to_type[arg_name]
# If there is a valid return annotation, include it in annotation_to_type. As with argument annotations,
# the literal annotation has to be convertible to a string by get_annotation_str, and the actual type
# of the annotation cannot be a string.
literal_return_annotation = get_annotation_str(f.returns)
valid_literal_annotation = literal_return_annotation is not None
return_annotation = signature.return_annotation
valid_return_annotation_type = (
return_annotation is not inspect.Parameter.empty
and not isinstance(return_annotation, str)
)
if valid_literal_annotation and valid_return_annotation_type:
annotation_to_type[literal_return_annotation] = return_annotation
return annotation_to_type
def createResolutionCallbackForClassMethods(cls):
"""
This looks at all the methods defined in a class and pulls their closed-over
variables into a dictionary and uses that to resolve variables.
"""
# cls is a type here, so `ismethod` is false since the methods on the type
# aren't bound to anything, so Python treats them as regular functions
fns = [
getattr(cls, name)
for name in cls.__dict__
if inspect.isroutine(getattr(cls, name))
]
captures = {}
for fn in fns:
captures.update(get_closure(fn))
captures.update(get_type_hint_captures(fn))
def lookup_in_class(key):
if key in captures:
return captures[key]
else:
return getattr(builtins, key, None)
return lookup_in_class
def boolean_dispatch(
arg_name, arg_index, default, if_true, if_false, module_name, func_name
):
"""
Dispatches to either of 2 script functions based on a boolean argument.
In TorchScript, the boolean argument must be constant so that the correct
function to use can be determined at compile time.
"""
def fn(*args, **kwargs):
dispatch_flag = False
if arg_name in kwargs:
dispatch_flag = kwargs[arg_name]
elif arg_index < len(args):
dispatch_flag = args[arg_index]
if dispatch_flag:
return if_true(*args, **kwargs)
else:
return if_false(*args, **kwargs)
if if_true.__doc__ is None and if_false.__doc__ is not None:
doc = if_false.__doc__
if_true.__doc__ = doc
elif if_false.__doc__ is None and if_true.__doc__ is not None:
doc = if_true.__doc__
if_false.__doc__ = doc
elif if_false.__doc__ is None and if_true.__doc__ is None:
# neither function has a docstring
doc = None
else:
raise RuntimeError("only one function can have a docstring")
fn.__doc__ = doc
if module_name is not None:
fn.__module__ = module_name
if func_name is not None:
fn.__name__ = func_name
boolean_dispatched[fn] = {
"if_true": if_true,
"if_false": if_false,
"index": arg_index,
"default": default,
"arg_name": arg_name,
}
return fn
class FunctionModifiers(object):
"""
Used to denote the behavior of a function in TorchScript. See export() and
ignore() for details.
"""
UNUSED = "unused (ignored and replaced with raising of an exception)"
IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)"
EXPORT = "export (compile this function even if nothing calls it)"
DEFAULT = "default (compile if called from a exported function / forward)"
COPY_TO_SCRIPT_WRAPPER = (
"if this method is not scripted, copy the python method onto the scripted model"
)
def export(fn):
"""
This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a
:class:`ScriptModule` and should be compiled.
``forward`` implicitly is assumed to be an entry point, so it does not need this decorator.
Functions and methods called from ``forward`` are compiled as they are seen
by the compiler, so they do not need this decorator either.
Example (using ``@torch.jit.export`` on a method):
.. testcode::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def implicitly_compiled_method(self, x):
return x + 99
# `forward` is implicitly decorated with `@torch.jit.export`,
# so adding it here would have no effect
def forward(self, x):
return x + 10
@torch.jit.export
def another_forward(self, x):
# When the compiler sees this call, it will compile
# `implicitly_compiled_method`
return self.implicitly_compiled_method(x)
def unused_method(self, x):
return x - 20
# `m` will contain compiled methods:
# `forward`
# `another_forward`
# `implicitly_compiled_method`
# `unused_method` will not be compiled since it was not called from
# any compiled methods and wasn't decorated with `@torch.jit.export`
m = torch.jit.script(MyModule())
"""
fn._torchscript_modifier = FunctionModifiers.EXPORT
return fn
def unused(fn):
"""
This decorator indicates to the compiler that a function or method should
be ignored and replaced with the raising of an exception. This allows you
to leave code in your model that is not yet TorchScript compatible and still
export your model.
Example (using ``@torch.jit.unused`` on a method)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self, use_memory_efficient):
super(MyModule, self).__init__()
self.use_memory_efficient = use_memory_efficient
@torch.jit.unused
def memory_efficient(self, x):
import pdb
pdb.set_trace()
return x + 10
def forward(self, x):
# Use not-yet-scriptable memory efficient mode
if self.use_memory_efficient:
return self.memory_efficient(x)
else:
return x + 10
m = torch.jit.script(MyModule(use_memory_efficient=False))
m.save("m.pt")
m = torch.jit.script(MyModule(use_memory_efficient=True))
# exception raised
m(torch.rand(100))
"""
if isinstance(fn, property):
prop = fn
setattr( # noqa: B010
prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED
)
if prop.fset:
setattr( # noqa: B010
prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED
)
return prop
fn._torchscript_modifier = FunctionModifiers.UNUSED
return fn
# No op context manager from python side
class _IgnoreContextManager(contextlib.AbstractContextManager):
def __init__(self, **kwargs):
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
pass
def ignore(drop=False, **kwargs):
"""
This decorator indicates to the compiler that a function or method should
be ignored and left as a Python function. This allows you to leave code in
your model that is not yet TorchScript compatible. If called from TorchScript,
ignored functions will dispatch the call to the Python interpreter. Models with ignored
functions cannot be exported; use :func:`@torch.jit.unused <torch.jit.unused>` instead.
Example (using ``@torch.jit.ignore`` on a method)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
@torch.jit.ignore
def debugger(self, x):
import pdb
pdb.set_trace()
def forward(self, x):
x += 10
# The compiler would normally try to compile `debugger`,
# but since it is `@ignore`d, it will be left as a call
# to Python
self.debugger(x)
return x
m = torch.jit.script(MyModule())
# Error! The call `debugger` cannot be saved since it calls into Python
m.save("m.pt")
Example (using ``@torch.jit.ignore(drop=True)`` on a method):
.. testcode::
import torch
import torch.nn as nn
class MyModule(nn.Module):
@torch.jit.ignore(drop=True)
def training_method(self, x):
import pdb
pdb.set_trace()
def forward(self, x):
if self.training:
self.training_method(x)
return x
m = torch.jit.script(MyModule())
# This is OK since `training_method` is not saved, the call is replaced
# with a `raise`.
m.save("m.pt")
.. testcleanup::
import os
os.remove('m.pt')
"""
if callable(drop):
# used without any args, so drop is actually a function
# @torch.jit.ignore
# def fn(...):
fn = drop
fn._torchscript_modifier = FunctionModifiers.IGNORE
return fn
if not isinstance(drop, bool):
raise RuntimeError(
"Argument to @torch.jit.ignore must be a bool or "
f"a function but got {drop}"
)
# for backwards compat
drop_on_export = kwargs.pop("drop_on_export", None)
if drop_on_export:
warnings.warn(
"ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
"call on compilation. Use torch.jit.unused now. {}",
category=FutureWarning,
)
drop = drop_on_export
elif drop:
warnings.warn(
"ignore(True) has been deprecated. TorchScript will now drop the function "
"call on compilation. Use torch.jit.unused now. {}",
category=FutureWarning,
)
def decorator(fn):
if drop:
fn._torchscript_modifier = FunctionModifiers.UNUSED
else:
fn._torchscript_modifier = FunctionModifiers.IGNORE
return fn
return decorator
def _copy_to_script_wrapper(fn):
fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
return fn
def module_has_exports(mod):
for name in dir(mod):
if hasattr(mod, name):
item = getattr(mod, name)
if callable(item):
if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
return True
return False
# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
# allow JIT'd code to still be covered.
def should_drop(fn) -> bool:
attr = get_torchscript_modifier(fn)
if attr is None:
return False
return attr is FunctionModifiers.UNUSED
def is_ignored_fn(fn) -> bool:
mod = get_torchscript_modifier(fn)
return mod is FunctionModifiers.UNUSED or mod is FunctionModifiers.IGNORE
def is_static_fn(cls, fn) -> bool:
return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
def get_static_fn(cls, fn):
return inspect.getattr_static(cls, fn).__func__
def get_torchscript_modifier(fn):
if not callable(fn):
return None
if hasattr(fn, "__func__"):
fn = fn.__func__
return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT)
def copy_torchscript_modifier(orig, new) -> None:
attr = get_torchscript_modifier(orig)
if attr is None:
return
new._torchscript_modifier = attr
# overloading registration
# overloads get registered in this file, and compiled in torch/jit/__init__.py
# so that they can be imported in nn/functional.py without an import cycle
# qualified_name => list[overload_functions]
_overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484
_OVERLOAD_EXAMPLE = """
Example usage of overload function:
@torch.jit._overload
def my_function(x: type0) -> type0: # decl 1
pass
@torch.jit._overload
def my_function(x: type1) -> type1: # decl 2
pass
def my_function(x): # implementation
if isinstance(x, type0):
return x
elif isinstance(x, type1):
return x
"""
def get_overload_no_implementation_error_message(kind, obj):
sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
return (
f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
f"sure a definition is provided and defined after all overload declarations.\n"
f'File "{filename}", line {file_lineno}:\n'
+ "".join(sourcelines)
+ "\n"
+ _OVERLOAD_EXAMPLE
)
def _check_overload_body(func):
try:
parsed_def = parse_def(func)
except OSError as e:
# Parsing the function definition can raise an OSError if source is unavailable.
# Since this is just an initial check, just raise a warning if this is the case.
warnings.warn(
f"Unable to retrieve source for @torch.jit._overload function: {func}."
)
return
body = parsed_def.ast.body[0].body
def is_pass(x):
return isinstance(x, ast.Pass)
def is_ellipsis(x):
return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis)
if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
msg = (
"Only `pass` statement or `...` can be the body of overload declaration:\n"
)
msg += "\n".join(parsed_def.source.split("\n")[:3])
msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
raise RuntimeError(msg)
def _overload(func):
_check_overload_body(func)
qual_name = _qualified_name(func)
global _overloaded_fns
fn_overload_list = _overloaded_fns.get(qual_name)
if fn_overload_list is None:
fn_overload_list = []
_overloaded_fns[qual_name] = fn_overload_list
fn_overload_list.append(func)
return func
def _get_fn_overloads(qual_name):
return _overloaded_fns.get(qual_name)
def _clear_fn_overloads(qual_name) -> None:
del _overloaded_fns[qual_name]
def get_class_name_lineno(method) -> Tuple[str, int]:
current_frame = inspect.currentframe()
# one for the get_class_name call, one for _overload_method call
for i in range(2):
assert (
current_frame is not None
) # assert current frame is not an Optional[FrameType]
current_frame = current_frame.f_back
assert current_frame is not None # same here
class_name = current_frame.f_code.co_name
line_no = current_frame.f_code.co_firstlineno
return class_name, line_no
# At the the point the decorator is applied to class methods the method
# has no reference to its owning class. _qualified_name would not include
# the class it is defined in, so any methods with the same name in the same file
# would have the same _qualified_name, even if they were defined in different
# classes. This problem only exists in python 2.
# We get around this problem by looking at the stack frame and identifying
# the class name, and throwing an error whenever overloads are used
# when modules of the same name are in the same file
# qualified_name => class name => list[overload_functions]
_overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484
# (qualified_name, class name) => class_fileno
_overloaded_method_class_fileno = {}
def _overload_method(func):
_check_overload_body(func)
qual_name = _qualified_name(func)
global _overloaded_methods
class_name_map = _overloaded_methods.get(qual_name, None)
if class_name_map is None:
class_name_map = {}
_overloaded_methods[qual_name] = class_name_map
class_name, line_no = get_class_name_lineno(func)
method_overloads = class_name_map.get(class_name, None)
if method_overloads is None:
method_overloads = []
class_name_map[class_name] = method_overloads
_overloaded_method_class_fileno[(qual_name, class_name)] = line_no
else:
existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
if existing_lineno != line_no:
raise RuntimeError(
"Cannot currently overload the same method name in two different"
" classes with the same name in the same module"
)
method_overloads.append(func)
return func
def _get_overloaded_methods(method, mod_class):
# TODO: __name__ not set for submodules in recursive script
if not hasattr(method, "__name__"):
return None
qual_name = _qualified_name(method)
class_name_map = _overloaded_methods.get(qual_name, None)
if class_name_map is None:
return None
overloads = class_name_map.get(mod_class.__name__, None)
if overloads is None:
return None
method_line_no = get_source_lines_and_file(method)[1]
mod_class_fileno = get_source_lines_and_file(mod_class)[1]
mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
raise Exception(
"Overloads are not useable when a module is redeclared within the same file: "
+ str(method)
)
return overloads
def is_tuple(ann) -> bool:
if ann is Tuple:
raise_error_container_parameter_missing("Tuple")
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
if not hasattr(ann, "__module__"):
return False
return ann.__module__ == "typing" and (
getattr(ann, "__origin__", None) is Tuple
or getattr(ann, "__origin__", None) is tuple
)
def is_list(ann) -> bool:
if ann is List:
raise_error_container_parameter_missing("List")
if not hasattr(ann, "__module__"):
return False
return ann.__module__ == "typing" and (
getattr(ann, "__origin__", None) is List
or getattr(ann, "__origin__", None) is list
)
def is_dict(ann) -> bool:
if ann is Dict:
raise_error_container_parameter_missing("Dict")
if not hasattr(ann, "__module__"):
return False
return ann.__module__ == "typing" and (
getattr(ann, "__origin__", None) is Dict
or getattr(ann, "__origin__", None) is dict
)
def is_union(ann):
if ann is Union:
raise_error_container_parameter_missing("Union")
return (
hasattr(ann, "__module__")
and ann.__module__ == "typing"
and (getattr(ann, "__origin__", None) is Union)
)
def is_optional(ann):
if ann is Optional:
raise_error_container_parameter_missing("Optional")
def is_optional_as_optional(ann):
return (
hasattr(ann, "__module__")
and ann.__module__ == "typing"
and (getattr(ann, "__origin__", None) is Optional)
)
def is_union_as_optional(ann):
ann_args = ann.__args__
return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args)
return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann))
def is_future(ann) -> bool:
if ann is Future:
raise RuntimeError(
"Attempted to use Future without a "
"contained type. Please add a contained type, e.g. "
"Future[int]"
)
return getattr(ann, "__origin__", None) is Future
if torch.distributed.rpc.is_available():
from torch._C._distributed_rpc import PyRRef
from torch.distributed.rpc import RRef
def is_rref(ann) -> bool:
if ann is RRef:
raise RuntimeError(
"Attempted to use RRef without a "
"contained type. Please add a contained type, e.g. "
"RRef[int]"
)
return getattr(ann, "__origin__", None) is RRef
def is_rref_instance(obj) -> bool:
return isinstance(obj, PyRRef)
else:
def is_rref_instance(obj) -> bool:
# If the RPC module doesn't exist then RRefs don't exist either.
return False
def is_final(ann) -> bool:
return ann.__module__ in {"typing", "typing_extensions"} and (
getattr(ann, "__origin__", None) is Final or isinstance(ann, type(Final))
)
# allows BroadcastingList instance to be subscriptable
class BroadcastingListCls(object):
def __getitem__(self, types):
return
# mypy doesn't support parameters on types, so we have to explicitly type each
# list size
BroadcastingList1 = BroadcastingListCls()
for i in range(2, 7):
globals()[f"BroadcastingList{i}"] = BroadcastingList1
def is_scripting() -> bool:
r"""
Function that returns True when in compilation and False otherwise. This
is useful especially with the @unused decorator to leave code in your
model that is not yet TorchScript compatible.
.. testcode::
import torch
@torch.jit.unused
def unsupported_linear_op(x):
return x
def linear(x):
if torch.jit.is_scripting():
return torch.linear(x)
else:
return unsupported_linear_op(x)
"""
return False
# Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.
def _qualified_name(obj, mangle_name=True) -> str:
# This special case allows us to override the qualified name on a type.
# It's currently used in conjunction with tracing, where we create a
# fake module to filter only supported attributes. However, since this
# new type is defined as a local class, we need a mechanism to override
# its qualname so it appears correctly in the TorchScript system. This,
# we set '_jit_override_qualname' with the original traced module's
# qualified name, which is picked up here
if hasattr(obj, "_jit_override_qualname"):
return obj._jit_override_qualname
# short-circuit in cases where the object already has a known qualified name
if isinstance(obj, torch._C.ScriptFunction):
return obj.qualified_name
if getattr(obj, "__name__", None):
name = obj.__name__
# Enum classes do not have `__name__` attr, instead they have `name`.
elif isinstance(obj, enum.Enum):
name = obj.name
else:
raise RuntimeError("Could not get name of python class object")
if name == "<lambda>":
name = "_lambda" # make name a valid identifier
module_name = obj.__module__
# If the module is actually a torchbind module, then we should short circuit
if module_name == "torch._classes":
return obj.qualified_name
# The Python docs are very clear that `__module__` can be None, but I can't
# figure out when it actually would be.
if module_name is None:
raise RuntimeError(
f"Could not get qualified name for class '{name}': "
"__module__ can't be None."
)
# if getattr(sys.modules[module_name], name) is not obj:
# raise RuntimeError(f"Could not get qualified name for class '{name}': "
# f"the attr {name} on module {module_name} is not the the class")
# torch.package and TorchScript have separate mangling schemes to avoid
# name collisions from multiple packages. To avoid them interfering with
# each other, normalize the package manging here.
if package_mangling.is_mangled(module_name):
module_name = module_name.replace("<", "_")
module_name = module_name.replace(">", "_")
# The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h
# does not need mangle the python class name.
if mangle_name:
# __main__ is a builtin module, so rewrite it to "__torch__".
if module_name == "__main__":
module_name = "__torch__"
else:
# Everything else gets a "__torch__" prefix to avoid name collisions
# with the names of user values.
module_name = "__torch__." + module_name
if "." in name:
raise RuntimeError(
f"Could not get qualified name for class '{name}': "
f"'{name}' is not a valid identifier"
)
return module_name + "." + name
def _try_get_dispatched_fn(fn):
if not callable(fn):
return None
return boolean_dispatched.get(fn)
def _get_named_tuple_properties(obj):
assert issubclass(obj, tuple) and hasattr(obj, "_fields")
if hasattr(obj, "_field_defaults"):
defaults = [
obj._field_defaults[field]
for field in obj._fields
if field in obj._field_defaults
]
else:
defaults = []
# In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function
# Also, annotations from base class are not inherited so they need to be queried explicitly
if sys.version_info[:2] < (3, 10):
obj_annotations = getattr(obj, "__annotations__", {})
else:
obj_annotations = inspect.get_annotations(obj)
if len(obj_annotations) == 0 and hasattr(obj, "__base__"):
obj_annotations = inspect.get_annotations(obj.__base__)
annotations = []
for field in obj._fields:
if field in obj_annotations:
the_type = torch.jit.annotations.ann_to_type(
obj_annotations[field], fake_range()
)
annotations.append(the_type)
else:
annotations.append(torch._C.TensorType.getInferred())
return type(obj).__name__, obj._fields, annotations, defaults
def _create_named_tuple(
t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...]
):
# mypy: namedtuple() expects a string literal as the first argument
if sys.version_info < (3, 7, 0):
TupleType = collections.namedtuple(unqual_name, field_names) # type: ignore[no-redef, misc]
TupleType.__new__.__defaults__ = defaults # type: ignore[attr-defined]
else:
TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
return TupleType(*t)
@contextlib.contextmanager
def _disable_emit_hooks():
hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
yield
torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
def __enter__(self) -> None:
self.hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
def __exit__(self, *args) -> None:
torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
def _is_exception(obj) -> bool:
if not inspect.isclass(obj):
return False
return issubclass(obj, Exception)
def raise_error_container_parameter_missing(target_type) -> None:
if target_type == "Dict":
raise RuntimeError(
"Attempted to use Dict without "
"contained types. Please add contained type, e.g. "
"Dict[int, int]"
)
raise RuntimeError(
f"Attempted to use {target_type} without a "
"contained type. Please add a contained type, e.g. "
f"{target_type}[int]"
)
def get_origin(target_type):
return getattr(target_type, "__origin__", None)
def get_args(target_type):
return getattr(target_type, "__args__", None)
def check_args_exist(target_type) -> None:
if target_type is List or target_type is list:
raise_error_container_parameter_missing("List")
elif target_type is Tuple or target_type is tuple:
raise_error_container_parameter_missing("Tuple")
elif target_type is Dict or target_type is dict:
raise_error_container_parameter_missing("Dict")
elif target_type is None or target_type is Optional:
raise_error_container_parameter_missing("Optional")
def check_empty_containers(obj) -> None:
if obj == [] or obj == {} or obj == ():
warnings.warn(
"The inner type of a container is lost when "
"calling torch.jit.isinstance in eager mode. For "
"example, List[int] would become list and "
"therefore falsely return True for List[float] or"
" List[str]."
)
# supports List/Dict/Tuple and Optional types
# TODO support future
def container_checker(obj, target_type) -> bool:
origin_type = get_origin(target_type)
check_args_exist(target_type)
if origin_type is list or origin_type is List:
check_empty_containers(obj)
if not isinstance(obj, list):
return False
arg_type = get_args(target_type)[0]
arg_origin = get_origin(arg_type)
for el in obj:
# check if nested container, ex: List[List[str]]
if arg_origin: # processes nested container, ex: List[List[str]]
if not container_checker(el, arg_type):
return False
elif not isinstance(el, arg_type):
return False
return True
elif origin_type is Dict or origin_type is dict:
check_empty_containers(obj)
if not isinstance(obj, dict):
return False
key_type = get_args(target_type)[0]
val_type = get_args(target_type)[1]
for key, val in obj.items():
# check if keys are of right type
if not isinstance(key, key_type):
return False
val_origin = get_origin(val_type)
if val_origin:
if not container_checker(val, val_type):
return False
elif not isinstance(val, val_type):
return False
return True
elif origin_type is Tuple or origin_type is tuple:
check_empty_containers(obj)
if not isinstance(obj, tuple):
return False
arg_types = get_args(target_type)
if len(obj) != len(arg_types):
return False
for el, el_type in zip(obj, arg_types):
el_origin = get_origin(el_type)
if el_origin:
if not container_checker(el, el_type):
return False
elif not isinstance(el, el_type):
return False
return True
elif origin_type is Union: # also handles Optional
if obj is None: # check before recursion because None is always fine
return True
inner_types = get_args(target_type)
for t in inner_types:
t_origin = get_origin(t)
if t_origin:
return container_checker(obj, t)
elif isinstance(obj, t):
return True
return False
def _isinstance(obj, target_type) -> bool:
if isinstance(target_type, collections.abc.Container):
if not isinstance(target_type, tuple):
raise RuntimeError(
"The second argument to "
"`torch.jit.isinstance` must be a type "
"or a tuple of types"
)
for t_type in target_type:
if _isinstance(obj, t_type):
return True
return False
origin_type = get_origin(target_type)
if origin_type:
return container_checker(obj, target_type)
# Check to handle non-typed optional origin returns as none instead
# of as optional in 3.7-3.8
check_args_exist(target_type)
# handle non-containers
return isinstance(obj, target_type)
class _TensorExtractor(pickle.Pickler):
def __init__(self, *args, tensors: List[torch.Tensor], **kwargs):
super().__init__(*args, **kwargs)
self.tensors = tensors
def persistent_id(self, obj):
if isinstance(obj, torch.Tensor):
self.tensors.append(obj)
return ""
# Since we just want to extract tensors, we don't mind if an object is
# unpicklable if it doesn't contain tensors, as we can just ignore/skip
# it. To play it safe, we only do so for common objects that we're sure
# don't contain tensors. Feel free to add new types here. Note also that
# even if a type isn't listed here this won't block users, since thet
# can just add a __getstate__ or __reduce__ method to their class.
if isinstance(obj, LockType):
return ""
# Futures and RRefs don't technically contain a value, they just offer
# the means to access a value.
if isinstance(obj, CFuture) or is_rref_instance(obj):
return ""
if isinstance(obj, torch.cuda.Event):
return ""
if isinstance(obj, threading.Thread):
return ""
return None
def _extract_tensors(obj):
r"""
This function is exclusively called from C++.
See ``torch/csrc/jit/python/python_ivalue.h``.
It extracts the tensors contained in the given object, through pickling.
"""
tensors: List[torch.Tensor] = []
extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
extractor.dump(obj)
return tensors
|
pytorch-master
|
torch/_jit_internal.py
|
import torch
import inspect
__all__ = []
# error: Module has no attribute "_return_types"
return_types = torch._C._return_types # type: ignore[attr-defined]
def pytree_register_structseq(cls):
def structseq_flatten(structseq):
return list(structseq), None
def structseq_unflatten(values, context):
return cls(values)
torch.utils._pytree._register_pytree_node(cls, structseq_flatten, structseq_unflatten)
for name in dir(return_types):
if name.startswith('__'):
continue
attr = getattr(return_types, name)
globals()[name] = attr
if not name.startswith('_'):
__all__.append(name)
# Today everything in torch.return_types is a structseq, aka a "namedtuple"-like
# thing defined by the Python C-API. We're going to need to modify this when that
# is no longer the case.
# NB: I don't know how to check that something is a "structseq" so we do a fuzzy
# check for tuple
if inspect.isclass(attr) and issubclass(attr, tuple):
pytree_register_structseq(attr)
|
pytorch-master
|
torch/return_types.py
|
# -*- coding: utf-8 -*-
"""Adds docstrings to functions defined in the torch._C"""
import re
import torch._C
from torch._C import _add_docstr as add_docstr
def parse_kwargs(desc):
"""Maps a description of args to a dictionary of {argname: description}.
Input:
(' weight (Tensor): a weight tensor\n' +
' Some optional description')
Output: {
'weight': \
'weight (Tensor): a weight tensor\n Some optional description'
}
"""
# Split on exactly 4 spaces after a newline
regx = re.compile(r"\n\s{4}(?!\s)")
kwargs = [section.strip() for section in regx.split(desc)]
kwargs = [section for section in kwargs if len(section) > 0]
return {desc.split(" ")[0]: desc for desc in kwargs}
def merge_dicts(*dicts):
return {x: d[x] for d in dicts for x in d}
common_args = parse_kwargs(
"""
input (Tensor): the input tensor.
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned tensor. Default: ``torch.preserve_format``.
"""
)
reduceops_common_args = merge_dicts(
common_args,
parse_kwargs(
"""
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
"""
),
)
multi_dim_common = merge_dicts(
reduceops_common_args,
parse_kwargs(
"""
dim (int or tuple of ints): the dimension or dimensions to reduce.
"""
),
{
"keepdim_details": """
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
"""
},
{
"opt_dim": """
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
If ``None``, all dimensions are reduced.
"""
},
)
single_dim_common = merge_dicts(
reduceops_common_args,
parse_kwargs(
"""
dim (int): the dimension to reduce.
"""
),
{
"keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`."""
},
)
factory_common_args = merge_dicts(
common_args,
parse_kwargs(
"""
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
"""
),
)
factory_like_common_args = parse_kwargs(
"""
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
"""
)
factory_data_common_args = parse_kwargs(
"""
data (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, infers data type from :attr:`data`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
"""
)
tf32_notes = {
"tf32_note": """This operator supports :ref:`TensorFloat32<tf32_on_ampere>`."""
}
rocm_fp16_notes = {
"rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \
:ref:`different precision<fp16_on_mi200>` for backward."""
}
reproducibility_notes = {
"forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \
a CUDA device. See :doc:`/notes/randomness` for more information.""",
"backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \
a CUDA device. See :doc:`/notes/randomness` for more information.""",
"cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \
and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \
undesirable, you can try to make the operation deterministic (potentially at \
a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \
See :doc:`/notes/randomness` for more information.""",
}
add_docstr(
torch.abs,
r"""
abs(input, *, out=None) -> Tensor
Computes the absolute value of each element in :attr:`input`.
.. math::
\text{out}_{i} = |\text{input}_{i}|
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.abs(torch.tensor([-1, -2, 3]))
tensor([ 1, 2, 3])
""".format(
**common_args
),
)
add_docstr(
torch.absolute,
r"""
absolute(input, *, out=None) -> Tensor
Alias for :func:`torch.abs`
""",
)
add_docstr(
torch.acos,
r"""
acos(input, *, out=None) -> Tensor
Computes the inverse cosine of each element in :attr:`input`.
.. math::
\text{out}_{i} = \cos^{-1}(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
>>> torch.acos(a)
tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
""".format(
**common_args
),
)
add_docstr(
torch.arccos,
r"""
arccos(input, *, out=None) -> Tensor
Alias for :func:`torch.acos`.
""",
)
add_docstr(
torch.acosh,
r"""
acosh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cosh^{-1}(\text{input}_{i})
Note:
The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
"""
+ r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4).uniform_(1, 2)
>>> a
tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
>>> torch.acosh(a)
tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
""".format(
**common_args
),
)
add_docstr(
torch.arccosh,
r"""
arccosh(input, *, out=None) -> Tensor
Alias for :func:`torch.acosh`.
""",
)
add_docstr(
torch.index_add,
r"""
index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
See :meth:`~Tensor.index_add_` for function description.
""",
)
add_docstr(
torch.index_copy,
r"""
index_copy(input, dim, index, source, *, out=None) -> Tensor
See :meth:`~Tensor.index_add_` for function description.
""",
)
add_docstr(
torch.index_reduce,
r"""
index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
See :meth:`~Tensor.index_reduce_` for function description.
""",
)
add_docstr(
torch.add,
r"""
add(input, other, *, alpha=1, out=None) -> Tensor
Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
"""
+ r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
{input}
other (Tensor or Number): the tensor or number to add to input.
Keyword arguments:
alpha (Number): the multiplier for :attr:`other`.
{out}
Examples::
>>> a = torch.randn(4)
>>> a
tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
>>> torch.add(a, 20)
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
>>> b = torch.randn(4)
>>> b
tensor([-0.9732, -0.3497, 0.6245, 0.4022])
>>> c = torch.randn(4, 1)
>>> c
tensor([[ 0.3743],
[-1.7724],
[-0.5811],
[-0.8017]])
>>> torch.add(b, c, alpha=10)
tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
[-18.6971, -18.0736, -17.0994, -17.3216],
[ -6.7845, -6.1610, -5.1868, -5.4090],
[ -8.9902, -8.3667, -7.3925, -7.6147]])
""".format(
**common_args
),
)
add_docstr(
torch.addbmm,
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
"""
+ r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
{tf32_note}
{rocm_fp16_note}
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
),
)
add_docstr(
torch.addcdiv,
r"""
addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiply the result by the scalar :attr:`value` and add it to :attr:`input`.
.. warning::
Integer division with addcdiv is no longer supported, and in a future
release addcdiv will perform a true division of tensor1 and tensor2.
The historic addcdiv behavior can be implemented as
(input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
The future addcdiv behavior is just the latter implementation:
(input + value * tensor1 / tensor2), for all dtypes.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
"""
+ r"""
The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the numerator tensor
tensor2 (Tensor): the denominator tensor
Keyword args:
value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
{out}
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcdiv(t, t1, t2, value=0.1)
tensor([[-0.2312, -3.6496, 0.1312],
[-1.0428, 3.4292, -0.1030],
[-0.5369, -0.9829, 0.0430]])
""".format(
**common_args
),
)
add_docstr(
torch.addcmul,
r"""
addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiply the result by the scalar :attr:`value`
and add it to :attr:`input`.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
"""
+ r"""
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the tensor to be multiplied
tensor2 (Tensor): the tensor to be multiplied
Keyword args:
value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
{out}
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcmul(t, t1, t2, value=0.1)
tensor([[-0.8635, -0.6391, 1.6174],
[-0.7617, -0.5879, 1.7388],
[-0.8353, -0.6249, 1.6511]])
""".format(
**common_args
),
)
add_docstr(
torch.addmm,
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
"""
+ r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
{tf32_note}
{rocm_fp16_note}
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
),
)
add_docstr(
torch.adjoint,
r"""
adjoint(Tensor) -> Tensor
Returns a view of the tensor conjugated and with the last two dimensions transposed.
``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
to ``x.transpose(-2, -1)`` for real tensors.
Example::
>>> x = torch.arange(4, dtype=torch.float)
>>> A = torch.complex(x, x).reshape(2, 2)
>>> A
tensor([[0.+0.j, 1.+1.j],
[2.+2.j, 3.+3.j]])
>>> A.adjoint()
tensor([[0.-0.j, 2.-2.j],
[1.-1.j, 3.-3.j]])
>>> (A.adjoint() == A.mH).all()
tensor(True)
""",
)
add_docstr(
torch.sspaddmm,
r"""
sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
Note: This function is equivalent to :func:`torch.addmm`, except
:attr:`input` and :attr:`mat1` are sparse.
Args:
input (Tensor): a sparse matrix to be added
mat1 (Tensor): a sparse matrix to be matrix multiplied
mat2 (Tensor): a dense matrix to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
{out}
""".format(
**common_args
),
)
add_docstr(
torch.smm,
r"""
smm(input, mat) -> Tensor
Performs a matrix multiplication of the sparse matrix :attr:`input`
with the dense matrix :attr:`mat`.
Args:
input (Tensor): a sparse matrix to be matrix multiplied
mat (Tensor): a dense matrix to be matrix multiplied
""",
)
add_docstr(
torch.addmv,
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
"""
+ r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be matrix multiplied
vec (Tensor): vector to be matrix multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
""".format(
**common_args
),
)
add_docstr(
torch.addr,
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
"""
+ r"""
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
{out}
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
""".format(
**common_args
),
)
add_docstr(
torch.allclose,
r"""
allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
This function checks if all :attr:`input` and :attr:`other` satisfy the condition:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
"""
+ r"""
elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
`numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Example::
>>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
False
>>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
True
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
False
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
True
""",
)
add_docstr(
torch.all,
r"""
all(input) -> Tensor
Tests if all elements in :attr:`input` evaluate to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.all(a)
tensor(False, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.all(a)
tensor(False)
.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.rand(4, 2).bool()
>>> a
tensor([[True, True],
[True, False],
[True, True],
[True, True]], dtype=torch.bool)
>>> torch.all(a, dim=1)
tensor([ True, False, True, True], dtype=torch.bool)
>>> torch.all(a, dim=0)
tensor([ True, False], dtype=torch.bool)
""".format(
**single_dim_common
),
)
add_docstr(
torch.any,
r"""
any(input) -> Tensor
Tests if any element in :attr:`input` evaluates to `True`.
.. note:: This function matches the behaviour of NumPy in returning
output of dtype `bool` for all supported dtypes except `uint8`.
For `uint8` the dtype of output is `uint8` itself.
Example::
>>> a = torch.rand(1, 2).bool()
>>> a
tensor([[False, True]], dtype=torch.bool)
>>> torch.any(a)
tensor(True, dtype=torch.bool)
>>> a = torch.arange(0, 3)
>>> a
tensor([0, 1, 2])
>>> torch.any(a)
tensor(True)
.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
:noindex:
For each row of :attr:`input` in the given dimension :attr:`dim`,
returns `True` if any element in the row evaluate to `True` and `False` otherwise.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 2) < 0
>>> a
tensor([[ True, True],
[False, True],
[ True, True],
[False, False]])
>>> torch.any(a, 1)
tensor([ True, True, True, False])
>>> torch.any(a, 0)
tensor([True, True])
""".format(
**single_dim_common
),
)
add_docstr(
torch.angle,
r"""
angle(input, *, out=None) -> Tensor
Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
.. math::
\text{out}_{i} = angle(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
.. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
zero for non-negative real numbers, and propagates NaNs. Previously
the function would return zero for all real numbers and not propagate
floating-point NaNs.
Example::
>>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
tensor([ 135., 135, -45])
""".format(
**common_args
),
)
add_docstr(
torch.as_strided,
r"""
as_strided(input, size, stride, storage_offset=None) -> Tensor
Create a view of an existing `torch.Tensor` :attr:`input` with specified
:attr:`size`, :attr:`stride` and :attr:`storage_offset`.
.. warning::
Prefer using other view functions, like :meth:`torch.Tensor.expand`,
to setting a view's strides manually with `as_strided`, as this
function's behavior depends on the implementation of a tensor's storage.
The constructed view of the storage must only refer to elements within
the storage or a runtime error will be thrown, and if the view is
"overlapped" (with multiple indices referring to the same element in
memory) its behavior is undefined.
Args:
{input}
size (tuple or ints): the shape of the output tensor
stride (tuple or ints): the stride of the output tensor
storage_offset (int, optional): the offset in the underlying storage of the output tensor.
If ``None``, the storage_offset of the output tensor will match the input tensor.
Example::
>>> x = torch.randn(3, 3)
>>> x
tensor([[ 0.9039, 0.6291, 1.0795],
[ 0.1586, 2.1939, -0.4900],
[-0.1909, -0.7503, 1.9355]])
>>> t = torch.as_strided(x, (2, 2), (1, 2))
>>> t
tensor([[0.9039, 1.0795],
[0.6291, 0.1586]])
>>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
tensor([[0.6291, 0.1586],
[1.0795, 2.1939]])
""".format(
**common_args
),
)
add_docstr(
torch.as_tensor,
r"""
as_tensor(data, dtype=None, device=None) -> Tensor
Converts data into a tensor, sharing data and preserving autograd
history if possible.
If data is already a tensor with the requeseted dtype and device
then data itself is returned, but if data is a
tensor with a different dtype or device then it's copied as if using
`data.to(dtype=dtype, device=device)`.
If data is a NumPy array (an ndarray) with the same dtype and device then a
tensor is constructed using :func:`torch.from_numpy`.
.. seealso::
:func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
Args:
{data}
{dtype}
device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
then the device of data is used. If None and data is not a tensor then
the result tensor is constructed on the CPU.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a, device=torch.device('cuda'))
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([1, 2, 3])
""".format(
**factory_data_common_args
),
)
add_docstr(
torch.asin,
r"""
asin(input, *, out=None) -> Tensor
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin^{-1}(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5962, 1.4985, -0.4396, 1.4525])
>>> torch.asin(a)
tensor([-0.6387, nan, -0.4552, nan])
""".format(
**common_args
),
)
add_docstr(
torch.arcsin,
r"""
arcsin(input, *, out=None) -> Tensor
Alias for :func:`torch.asin`.
""",
)
add_docstr(
torch.asinh,
r"""
asinh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sinh^{-1}(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
>>> torch.asinh(a)
tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
""".format(
**common_args
),
)
add_docstr(
torch.arcsinh,
r"""
arcsinh(input, *, out=None) -> Tensor
Alias for :func:`torch.asinh`.
""",
)
add_docstr(
torch.atan,
r"""
atan(input, *, out=None) -> Tensor
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan^{-1}(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
>>> torch.atan(a)
tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
""".format(
**common_args
),
)
add_docstr(
torch.arctan,
r"""
arctan(input, *, out=None) -> Tensor
Alias for :func:`torch.atan`.
""",
)
add_docstr(
torch.atan2,
r"""
atan2(input, other, *, out=None) -> Tensor
Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
with consideration of the quadrant. Returns a new tensor with the signed angles
in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
parameter, is the y-coordinate.)
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
>>> torch.atan2(a, torch.randn(4))
tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
""".format(
**common_args
),
)
add_docstr(
torch.arctan2,
r"""
arctan2(input, other, *, out=None) -> Tensor
Alias for :func:`torch.atan2`.
""",
)
add_docstr(
torch.atanh,
r"""
atanh(input, *, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
Note:
The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
mapped to `+/-INF` respectively.
.. math::
\text{out}_{i} = \tanh^{-1}(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4).uniform_(-1, 1)
>>> a
tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
>>> torch.atanh(a)
tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
""".format(
**common_args
),
)
add_docstr(
torch.arctanh,
r"""
arctanh(input, *, out=None) -> Tensor
Alias for :func:`torch.atanh`.
""",
)
add_docstr(
torch.asarray,
r"""
asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
Converts :attr:`obj` to a tensor.
:attr:`obj` can be one of:
1. a tensor
2. a NumPy array
3. a DLPack capsule
4. an object that implements Python's buffer protocol
5. a scalar
6. a sequence of scalars
When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
same device, and share memory with it. These properties can be controlled with the
:attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
If the returned tensor is of a different datatype, on a different device, or a copy is
requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
also a tensor with an autograd history then the returned tensor will have the same history.
When :attr:`obj` is not a tensor, NumPy Array, or DLPack capsule but implements Python's
buffer protocol then the buffer is interpreted as an array of bytes grouped according to
the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
passed then the default floating point datatype is used, instead.) The returned tensor
will have the specified datatype (or default floating point datatype if none is specified)
and, by default, be on the CPU device and share memory with the buffer.
When :attr:`obj` is none of the above but a scalar or sequence of scalars then the
returned tensor will, by default, infer its datatype from the scalar values, be on the
CPU device, and not share its memory.
.. seealso::
:func:`torch.tensor` creates a tensor that always copies the data from the input object.
:func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
:func:`torch.frombuffer` creates a tensor that always shares memory from objects that
implement the buffer protocol.
:func:`torch.from_dlpack` creates a tensor that always shares memory from
DLPack capsules.
Args:
obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
buffer protocol, scalar, or sequence of scalars.
Keyword args:
dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
Default: ``None``, which causes the datatype of the returned tensor to be
inferred from :attr:`obj`.
copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
whenever possible. If ``True`` then the returned tensor does not share its memory.
If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
error is thrown if it cannot.
device (:class:`torch.device`, optional): the device of the returned tensor.
Default: ``None``, which causes the device of :attr:`obj` to be used.
requires_grad (bool, optional): whether the returned tensor requires grad.
Default: ``False``, which causes the returned tensor not to require a gradient.
If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
is also a tensor with an autograd history then the returned tensor will have
the same history.
Example::
>>> a = torch.tensor([1, 2, 3])
>>> # Shares memory with tensor 'a'
>>> b = torch.asarray(a)
>>> a.data_ptr() == b.data_ptr()
True
>>> # Forces memory copy
>>> c = torch.asarray(a, copy=True)
>>> a.data_ptr() == c.data_ptr()
False
>>> a = torch.tensor([1, 2, 3], requires_grad=True).float()
>>> b = a + 2
>>> b
tensor([1., 2., 3.], grad_fn=<AddBackward0>)
>>> # Shares memory with tensor 'b', with no grad
>>> c = torch.asarray(b)
>>> c
tensor([1., 2., 3.])
>>> # Shares memory with tensor 'b', retaining autograd history
>>> d = torch.asarray(b, requires_grad=True)
>>> d
tensor([1., 2., 3.], grad_fn=<AddBackward0>)
>>> array = numpy.array([1, 2, 3])
>>> # Shares memory with array 'array'
>>> t1 = torch.asarray(array)
>>> array.__array_interface__['data'][0] == t1.data_ptr()
True
>>> # Copies memory due to dtype mismatch
>>> t2 = torch.asarray(array, dtype=torch.float32)
>>> array.__array_interface__['data'][0] == t1.data_ptr()
False
""",
)
add_docstr(
torch.baddbmm,
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
it will not be propagated.
"""
+ r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
{tf32_note}
{rocm_fp16_note}
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
Keyword args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
),
)
add_docstr(
torch.bernoulli,
r"""
bernoulli(input, *, generator=None, out=None) -> Tensor
Draws binary random numbers (0 or 1) from a Bernoulli distribution.
The :attr:`input` tensor should be a tensor containing probabilities
to be used for drawing the binary random number.
Hence, all values in :attr:`input` have to be in the range:
:math:`0 \leq \text{input}_i \leq 1`.
The :math:`\text{i}^{th}` element of the output tensor will draw a
value :math:`1` according to the :math:`\text{i}^{th}` probability value given
in :attr:`input`.
.. math::
\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
"""
+ r"""
The returned :attr:`out` tensor only has values 0 or 1 and is of the same
shape as :attr:`input`.
:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
point ``dtype``.
Args:
input (Tensor): the input tensor of probability values for the Bernoulli distribution
Keyword args:
{generator}
{out}
Example::
>>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
>>> a
tensor([[ 0.1737, 0.0950, 0.3609],
[ 0.7148, 0.0289, 0.2676],
[ 0.9456, 0.8937, 0.7202]])
>>> torch.bernoulli(a)
tensor([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> a = torch.ones(3, 3) # probability of drawing "1" is 1
>>> torch.bernoulli(a)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
>>> torch.bernoulli(a)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
""".format(
**common_args
),
)
add_docstr(
torch.bincount,
r"""
bincount(input, weights=None, minlength=0) -> Tensor
Count the frequency of each value in an array of non-negative ints.
The number of bins (size 1) is one larger than the largest value in
:attr:`input` unless :attr:`input` is empty, in which case the result is a
tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
``out[n] += weights[i]`` if :attr:`weights` is specified else
``out[n] += 1``.
Note:
{backward_reproducibility_note}
Arguments:
input (Tensor): 1-d int tensor
weights (Tensor): optional, weight for each value in the input tensor.
Should be of same size as input tensor.
minlength (int): optional, minimum number of bins. Should be non-negative.
Returns:
output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
:attr:`input` is non-empty, else ``Size(0)``
Example::
>>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
>>> weights = torch.linspace(0, 1, steps=5)
>>> input, weights
(tensor([4, 3, 6, 3, 4]),
tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
>>> torch.bincount(input)
tensor([0, 0, 0, 2, 2, 0, 1])
>>> input.bincount(weights)
tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
""".format(
**reproducibility_notes
),
)
add_docstr(
torch.bitwise_not,
r"""
bitwise_not(input, *, out=None) -> Tensor
Computes the bitwise NOT of the given input tensor. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical NOT.
Args:
{input}
Keyword args:
{out}
Example:
>>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
tensor([ 0, 1, -4], dtype=torch.int8)
""".format(
**common_args
),
)
add_docstr(
torch.bmm,
r"""
bmm(input, mat2, *, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored in :attr:`input`
and :attr:`mat2`.
:attr:`input` and :attr:`mat2` must be 3-D tensors each containing
the same number of matrices.
If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
:math:`(b \times m \times p)` tensor, :attr:`out` will be a
:math:`(b \times n \times p)` tensor.
.. math::
\text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
"""
+ r"""
{tf32_note}
{rocm_fp16_note}
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Args:
input (Tensor): the first batch of matrices to be multiplied
mat2 (Tensor): the second batch of matrices to be multiplied
Keyword Args:
{out}
Example::
>>> input = torch.randn(10, 3, 4)
>>> mat2 = torch.randn(10, 4, 5)
>>> res = torch.bmm(input, mat2)
>>> res.size()
torch.Size([10, 3, 5])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
),
)
add_docstr(
torch.bitwise_and,
r"""
bitwise_and(input, other, *, out=None) -> Tensor
Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical AND.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([1, 0, 3], dtype=torch.int8)
>>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ False, True, False])
""".format(
**common_args
),
)
add_docstr(
torch.bitwise_or,
r"""
bitwise_or(input, other, *, out=None) -> Tensor
Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical OR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -2, 3], dtype=torch.int8)
>>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, True, False])
""".format(
**common_args
),
)
add_docstr(
torch.bitwise_xor,
r"""
bitwise_xor(input, other, *, out=None) -> Tensor
Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical XOR.
Args:
input: the first input tensor
other: the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 0], dtype=torch.int8)
>>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, False, False])
""".format(
**common_args
),
)
add_docstr(
torch.bitwise_left_shift,
r"""
bitwise_left_shift(input, other, *, out=None) -> Tensor
Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
The operation applied is:
.. math::
\text{{out}}_i = \text{{input}}_i << \text{{other}}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 24], dtype=torch.int8)
""".format(
**common_args
),
)
add_docstr(
torch.bitwise_right_shift,
r"""
bitwise_right_shift(input, other, *, out=None) -> Tensor
Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
The input tensor must be of integral type. This operator supports
:ref:`broadcasting to a common shape <broadcasting-semantics>` and
:ref:`type promotion <type-promotion-doc>`.
The operation applied is:
.. math::
\text{{out}}_i = \text{{input}}_i >> \text{{other}}_i
Args:
input (Tensor or Scalar): the first input tensor
other (Tensor or Scalar): the second input tensor
Keyword args:
{out}
Example:
>>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -7, 3], dtype=torch.int8)
""".format(
**common_args
),
)
add_docstr(
torch.broadcast_to,
r"""
broadcast_to(input, shape) -> Tensor
Broadcasts :attr:`input` to the shape :attr:`\shape`.
Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
Args:
{input}
shape (list, tuple, or :class:`torch.Size`): the new shape.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> torch.broadcast_to(x, (3, 3))
tensor([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
""".format(
**common_args
),
)
add_docstr(
torch.stack,
r"""
stack(tensors, dim=0, *, out=None) -> Tensor
Concatenates a sequence of tensors along a new dimension.
All tensors need to be of the same size.
Arguments:
tensors (sequence of Tensors): sequence of tensors to concatenate
dim (int): dimension to insert. Has to be between 0 and the number
of dimensions of concatenated tensors (inclusive)
Keyword args:
{out}
""".format(
**common_args
),
)
add_docstr(
torch.hstack,
r"""
hstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence horizontally (column wise).
This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.hstack((a,b))
tensor([1, 2, 3, 4, 5, 6])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.hstack((a,b))
tensor([[1, 4],
[2, 5],
[3, 6]])
""".format(
**common_args
),
)
add_docstr(
torch.vstack,
r"""
vstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.vstack((a,b))
tensor([[1, 2, 3],
[4, 5, 6]])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.vstack((a,b))
tensor([[1],
[2],
[3],
[4],
[5],
[6]])
""".format(
**common_args
),
)
add_docstr(
torch.dstack,
r"""
dstack(tensors, *, out=None) -> Tensor
Stack tensors in sequence depthwise (along third axis).
This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.dstack((a,b))
tensor([[[1, 4],
[2, 5],
[3, 6]]])
>>> a = torch.tensor([[1],[2],[3]])
>>> b = torch.tensor([[4],[5],[6]])
>>> torch.dstack((a,b))
tensor([[[1, 4]],
[[2, 5]],
[[3, 6]]])
""".format(
**common_args
),
)
add_docstr(
torch.tensor_split,
r"""
tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
along dimension :attr:`dim` according to the indices or number of sections specified
by :attr:`indices_or_sections`. This function is based on NumPy's
:func:`numpy.array_split`.
Args:
input (Tensor): the tensor to split
indices_or_sections (Tensor, int or list or tuple of ints):
If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
have size :code:`int(input.size(dim) / n)`.
If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
If indices_or_sections is a tensor, it must be a zero-dimensional or one-dimensional
long tensor on the CPU.
dim (int, optional): dimension along which to split the tensor. Default: ``0``
Example::
>>> x = torch.arange(8)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
>>> x = torch.arange(7)
>>> torch.tensor_split(x, 3)
(tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
>>> torch.tensor_split(x, (1, 6))
(tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
>>> x = torch.arange(14).reshape(2, 7)
>>> x
tensor([[ 0, 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12, 13]])
>>> torch.tensor_split(x, 3, dim=1)
(tensor([[0, 1, 2],
[7, 8, 9]]),
tensor([[ 3, 4],
[10, 11]]),
tensor([[ 5, 6],
[12, 13]]))
>>> torch.tensor_split(x, (1, 6), dim=1)
(tensor([[0],
[7]]),
tensor([[ 1, 2, 3, 4, 5],
[ 8, 9, 10, 11, 12]]),
tensor([[ 6],
[13]]))
""",
)
add_docstr(
torch.chunk,
r"""
chunk(input, chunks, dim=0) -> List of Tensors
Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
the input tensor.
.. note::
This function may return less then the specified number of chunks!
.. seealso::
:func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
If the tensor size along the given dimesion :attr:`dim` is divisible by :attr:`chunks`,
all returned chunks will be the same size.
If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
all returned chunks will be the same size, except the last one.
If such division is not possible, this function may return less
than the specified number of chunks.
Arguments:
input (Tensor): the tensor to split
chunks (int): number of chunks to return
dim (int): dimension along which to split the tensor
Example::
>>> torch.arange(11).chunk(6)
(tensor([0, 1]),
tensor([2, 3]),
tensor([4, 5]),
tensor([6, 7]),
tensor([8, 9]),
tensor([10]))
>>> torch.arange(12).chunk(6)
(tensor([0, 1]),
tensor([2, 3]),
tensor([4, 5]),
tensor([6, 7]),
tensor([8, 9]),
tensor([10, 11]))
>>> torch.arange(13).chunk(6)
(tensor([0, 1, 2]),
tensor([3, 4, 5]),
tensor([6, 7, 8]),
tensor([ 9, 10, 11]),
tensor([12]))
""",
)
add_docstr(
torch.unsafe_chunk,
r"""
unsafe_chunk(input, chunks, dim=0) -> List of Tensors
Works like :func:`torch.chunk` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
""",
)
add_docstr(
torch.unsafe_split,
r"""
unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
Works like :func:`torch.split` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
""",
)
add_docstr(
torch.hsplit,
r"""
hsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
horizontally according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
If :attr:`input` is one dimensional this is equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
except that if :attr:`indices_or_sections` is an integer it must evenly divide
the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.hsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.hsplit(t, 2)
(tensor([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
tensor([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]]))
>>> torch.hsplit(t, [3, 6])
(tensor([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
tensor([[ 3.],
[ 7.],
[11.],
[15.]]),
tensor([], size=(4, 0)))
""",
)
add_docstr(
torch.vsplit,
r"""
vsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
vertically according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.vsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(4,4)
>>> t
tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> torch.vsplit(t, 2)
(tensor([[0., 1., 2., 3.],
[4., 5., 6., 7.]]),
tensor([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]))
>>> torch.vsplit(t, [3, 6])
(tensor([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
tensor([[12., 13., 14., 15.]]),
tensor([], size=(0, 4)))
""",
)
add_docstr(
torch.dsplit,
r"""
dsplit(input, indices_or_sections) -> List of Tensors
Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
depthwise according to :attr:`indices_or_sections`. Each split is a view of
:attr:`input`.
This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
it must evenly divide the split dimension or a runtime error will be thrown.
This function is based on NumPy's :func:`numpy.dsplit`.
Args:
input (Tensor): tensor to split.
indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
Example::
>>> t = torch.arange(16.0).reshape(2, 2, 4)
>>> t
tensor([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> torch.dsplit(t, 2)
(tensor([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]),
tensor([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]]))
>>> torch.dsplit(t, [3, 6])
(tensor([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
tensor([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
tensor([], size=(2, 2, 0)))
""",
)
add_docstr(
torch.can_cast,
r"""
can_cast(from, to) -> bool
Determines if a type conversion is allowed under PyTorch casting rules
described in the type promotion :ref:`documentation <type-promotion-doc>`.
Args:
from (dtype): The original :class:`torch.dtype`.
to (dtype): The target :class:`torch.dtype`.
Example::
>>> torch.can_cast(torch.double, torch.float)
True
>>> torch.can_cast(torch.float, torch.int)
False
""",
)
add_docstr(
torch.corrcoef,
r"""
corrcoef(input) -> Tensor
Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
where rows are the variables and columns are the observations.
.. note::
The correlation coefficient matrix R is computed using the covariance matrix C as given by
:math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
.. note::
Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
Args:
input (Tensor): A 2D matrix containing multiple variables and observations, or a
Scalar or 1D vector representing a single variable.
Returns:
(Tensor) The correlation coefficient matrix of the variables.
.. seealso::
:func:`torch.cov` covariance matrix.
Example::
>>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
>>> torch.corrcoef(x)
tensor([[ 1., -1.],
[-1., 1.]])
>>> x = torch.randn(2, 4)
>>> x
tensor([[-0.2678, -0.0908, -0.3766, 0.2780],
[-0.5812, 0.1535, 0.2387, 0.2350]])
>>> torch.corrcoef(x)
tensor([[1.0000, 0.3582],
[0.3582, 1.0000]])
>>> torch.corrcoef(x[0])
tensor(1.)
""",
)
add_docstr(
torch.cov,
r"""
cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
the variables and columns are the observations.
A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
a single variable (Scalar or 1D) then its variance is returned.
The unbiased sample covariance of the variables :math:`x` and :math:`y` is given by:
.. math::
\text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{N~-~1}
where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively.
If :attr:`fweights` and/or :attr:`aweights` are provided, the unbiased weighted covariance
is calculated, which is given by:
.. math::
\text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}{\sum^{N}_{i = 1}w_i~-~1}
where :math:`w` denotes :attr:`fweights` or :attr:`aweights` based on whichever is provided, or
:math:`w = fweights \times aweights` if both are provided, and
:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable.
Args:
input (Tensor): A 2D matrix containing multiple variables and observations, or a
Scalar or 1D vector representing a single variable.
Keyword Args:
correction (int, optional): difference between the sample size and sample degrees of freedom.
Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
will return the simple average. Defaults to ``1``.
fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
Must have integral dtype. Ignored if ``None``. `Defaults to ``None``.
aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
These relative weights are typically large for observations considered “important” and smaller for
observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
Must have floating point dtype. Ignored if ``None``. `Defaults to ``None``.
Returns:
(Tensor) The covariance matrix of the variables.
.. seealso::
:func:`torch.corrcoef` normalized covariance matrix.
Example::
>>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
>>> x
tensor([[0, 1, 2],
[2, 1, 0]])
>>> torch.cov(x)
tensor([[ 1., -1.],
[-1., 1.]])
>>> torch.cov(x, correction=0)
tensor([[ 0.6667, -0.6667],
[-0.6667, 0.6667]])
>>> fw = torch.randint(1, 10, (3,))
>>> fw
tensor([1, 6, 9])
>>> aw = torch.rand(3)
>>> aw
tensor([0.4282, 0.0255, 0.4144])
>>> torch.cov(x, fweights=fw, aweights=aw)
tensor([[ 0.4169, -0.4169],
[-0.4169, 0.4169]])
""",
)
add_docstr(
torch.cat,
r"""
cat(tensors, dim=0, *, out=None) -> Tensor
Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
All tensors must either have the same shape (except in the concatenating
dimension) or be empty.
:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
and :func:`torch.chunk`.
:func:`torch.cat` can be best understood via examples.
Args:
tensors (sequence of Tensors): any python sequence of tensors of the same type.
Non-empty tensors provided must have the same shape, except in the
cat dimension.
dim (int, optional): the dimension over which the tensors are concatenated
Keyword args:
{out}
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 0)
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 1)
tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
-1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
-0.5790, 0.1497]])
""".format(
**common_args
),
)
add_docstr(
torch.concat,
r"""
concat(tensors, dim=0, *, out=None) -> Tensor
Alias of :func:`torch.cat`.
""",
)
add_docstr(
torch.ceil,
r"""
ceil(input, *, out=None) -> Tensor
Returns a new tensor with the ceil of the elements of :attr:`input`,
the smallest integer greater than or equal to each element.
.. math::
\text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.6341, -1.4208, -1.0900, 0.5826])
>>> torch.ceil(a)
tensor([-0., -1., -1., 1.])
""".format(
**common_args
),
)
add_docstr(
torch.real,
r"""
real(input) -> Tensor
Returns a new tensor containing real values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.real
tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
""".format(
**common_args
),
)
add_docstr(
torch.imag,
r"""
imag(input) -> Tensor
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`imag` is only supported for tensors with complex dtypes.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.imag
tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
""".format(
**common_args
),
)
add_docstr(
torch.view_as_real,
r"""
view_as_real(input) -> Tensor
Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
represents the real and imaginary components of complex numbers.
.. warning::
:func:`view_as_real` is only supported for tensors with ``complex dtypes``.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
>>> torch.view_as_real(x)
tensor([[ 0.4737, -0.3839],
[-0.2098, -0.6699],
[ 0.3470, -0.9451],
[-0.5174, -1.3136]])
""".format(
**common_args
),
)
add_docstr(
torch.view_as_complex,
r"""
view_as_complex(input) -> Tensor
Returns a view of :attr:`input` as a complex tensor. For an input complex
tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
dimension of the input tensor is expected to represent the real and imaginary
components of complex numbers.
.. warning::
:func:`view_as_complex` is only supported for tensors with
:class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
expected to have the last dimension of :attr:`size` 2. In addition, the
tensor must have a `stride` of 1 for its last dimension. The strides of all
other dimensions must be even numbers.
Args:
{input}
Example::
>>> x=torch.randn(4, 2)
>>> x
tensor([[ 1.6116, -0.5772],
[-1.4606, -0.9120],
[ 0.0786, -1.7497],
[-0.6561, -1.6623]])
>>> torch.view_as_complex(x)
tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
""".format(
**common_args
),
)
add_docstr(
torch.reciprocal,
r"""
reciprocal(input, *, out=None) -> Tensor
Returns a new tensor with the reciprocal of the elements of :attr:`input`
.. math::
\text{out}_{i} = \frac{1}{\text{input}_{i}}
.. note::
Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
the default scalar type.
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.4595, -2.1219, -1.4314, 0.7298])
>>> torch.reciprocal(a)
tensor([-2.1763, -0.4713, -0.6986, 1.3702])
""".format(
**common_args
),
)
add_docstr(
torch.cholesky,
r"""
cholesky(input, upper=False, *, out=None) -> Tensor
Computes the Cholesky decomposition of a symmetric positive-definite
matrix :math:`A` or for batches of symmetric positive-definite matrices.
If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
the decomposition has the form:
.. math::
A = U^TU
If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
the decomposition has the form:
.. math::
A = LL^T
If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
tensor will be composed of lower-triangular Cholesky factors of each of the individual
matrices.
.. warning::
:func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
and will be removed in a future PyTorch release.
``L = torch.cholesky(A)`` should be replaced with
.. code:: python
L = torch.linalg.cholesky(A)
``U = torch.cholesky(A, upper=True)`` should be replaced with
.. code:: python
U = torch.linalg.cholesky(A).mH
This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
Args:
input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions consisting of symmetric positive-definite matrices.
upper (bool, optional): flag that indicates whether to return a
upper or lower triangular matrix. Default: ``False``
Keyword args:
out (Tensor, optional): the output matrix
Example::
>>> a = torch.randn(3, 3)
>>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> a
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> l
tensor([[ 1.5528, 0.0000, 0.0000],
[-0.4821, 1.0592, 0.0000],
[ 0.9371, 0.5487, 0.7023]])
>>> l @ l.mT
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> a = torch.randn(3, 2, 2) # Example for batched input
>>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> z = l @ l.mT
>>> torch.dist(z, a)
tensor(2.3842e-07)
""",
)
add_docstr(
torch.cholesky_solve,
r"""
cholesky_solve(input, input2, upper=False, *, out=None) -> Tensor
Solves a linear system of equations with a positive semidefinite
matrix to be inverted given its Cholesky factor matrix :math:`u`.
If :attr:`upper` is ``False``, :math:`u` is and lower triangular and `c` is
returned such that:
.. math::
c = (u u^T)^{{-1}} b
If :attr:`upper` is ``True`` or not provided, :math:`u` is upper triangular
and `c` is returned such that:
.. math::
c = (u^T u)^{{-1}} b
`torch.cholesky_solve(b, u)` can take in 2D inputs `b, u` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `c`
Supports real-valued and complex-valued inputs.
For the complex-valued inputs the transpose operator above is the conjugate transpose.
Args:
input (Tensor): input matrix :math:`b` of size :math:`(*, m, k)`,
where :math:`*` is zero or more batch dimensions
input2 (Tensor): input matrix :math:`u` of size :math:`(*, m, m)`,
where :math:`*` is zero of more batch dimensions composed of
upper or lower triangular Cholesky factor
upper (bool, optional): whether to consider the Cholesky factor as a
lower or upper triangular matrix. Default: ``False``.
Keyword args:
out (Tensor, optional): the output tensor for `c`
Example::
>>> a = torch.randn(3, 3)
>>> a = torch.mm(a, a.t()) # make symmetric positive definite
>>> u = torch.linalg.cholesky(a)
>>> a
tensor([[ 0.7747, -1.9549, 1.3086],
[-1.9549, 6.7546, -5.4114],
[ 1.3086, -5.4114, 4.8733]])
>>> b = torch.randn(3, 2)
>>> b
tensor([[-0.6355, 0.9891],
[ 0.1974, 1.4706],
[-0.4115, -0.6225]])
>>> torch.cholesky_solve(b, u)
tensor([[ -8.1625, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
>>> torch.mm(a.inverse(), b)
tensor([[ -8.1626, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
""",
)
add_docstr(
torch.cholesky_inverse,
r"""
cholesky_inverse(input, upper=False, *, out=None) -> Tensor
Computes the inverse of a symmetric positive-definite matrix :math:`A` using its
Cholesky factor :math:`u`: returns matrix ``inv``. The inverse is computed using
LAPACK routines ``dpotri`` and ``spotri`` (and the corresponding MAGMA routines).
If :attr:`upper` is ``False``, :math:`u` is lower triangular
such that the returned tensor is
.. math::
inv = (uu^{{T}})^{{-1}}
If :attr:`upper` is ``True`` or not provided, :math:`u` is upper
triangular such that the returned tensor is
.. math::
inv = (u^T u)^{{-1}}
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :math:`A` is a batch of matrices then the output has the same batch dimensions.
Args:
input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)`,
consisting of symmetric positive-definite matrices
where :math:`*` is zero or more batch dimensions.
upper (bool, optional): flag that indicates whether to return a
upper or lower triangular matrix. Default: False
Keyword args:
out (Tensor, optional): the output tensor for `inv`
Example::
>>> a = torch.randn(3, 3)
>>> a = torch.mm(a, a.t()) + 1e-05 * torch.eye(3) # make symmetric positive definite
>>> u = torch.linalg.cholesky(a)
>>> a
tensor([[ 0.9935, -0.6353, 1.5806],
[ -0.6353, 0.8769, -1.7183],
[ 1.5806, -1.7183, 10.6618]])
>>> torch.cholesky_inverse(u)
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
>>> a.inverse()
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
>>> a = torch.randn(3, 2, 2) # Example for batched input
>>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
>>> l = torch.linalg.cholesky(a)
>>> z = l @ l.mT
>>> torch.dist(z, a)
tensor(3.5894e-07)
""",
)
add_docstr(
torch.clone,
r"""
clone(input, *, memory_format=torch.preserve_format) -> Tensor
Returns a copy of :attr:`input`.
.. note::
This function is differentiable, so gradients will flow back from the
result of this operation to :attr:`input`. To create a tensor without an
autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
Args:
{input}
Keyword args:
{memory_format}
""".format(
**common_args
),
)
add_docstr(
torch.clamp,
r"""
clamp(input, min=None, max=None, *, out=None) -> Tensor
Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
.. math::
y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
If :attr:`min` is ``None``, there is no lower bound.
Or, if :attr:`max` is ``None`` there is no upper bound.
"""
+ r"""
.. note::
If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
sets all elements in :attr:`input` to the value of :attr:`max`.
Args:
{input}
min (Number or Tensor, optional): lower-bound of the range to be clamped to
max (Number or Tensor, optional): upper-bound of the range to be clamped to
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.7120, 0.1734, -0.0478, -0.0922])
>>> torch.clamp(a, min=-0.5, max=0.5)
tensor([-0.5000, 0.1734, -0.0478, -0.0922])
>>> min = torch.linspace(-1, 1, steps=4)
>>> torch.clamp(a, min=min)
tensor([-1.0000, 0.1734, 0.3333, 1.0000])
""".format(
**common_args
),
)
add_docstr(
torch.clip,
r"""
clip(input, min=None, max=None, *, out=None) -> Tensor
Alias for :func:`torch.clamp`.
""",
)
add_docstr(
torch.column_stack,
r"""
column_stack(tensors, *, out=None) -> Tensor
Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
Args:
tensors (sequence of Tensors): sequence of tensors to concatenate
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 2, 3])
>>> b = torch.tensor([4, 5, 6])
>>> torch.column_stack((a, b))
tensor([[1, 4],
[2, 5],
[3, 6]])
>>> a = torch.arange(5)
>>> b = torch.arange(10).reshape(5, 2)
>>> torch.column_stack((a, b, b))
tensor([[0, 0, 1, 0, 1],
[1, 2, 3, 2, 3],
[2, 4, 5, 4, 5],
[3, 6, 7, 6, 7],
[4, 8, 9, 8, 9]])
""".format(
**common_args
),
)
add_docstr(
torch.complex,
r"""
complex(real, imag, *, out=None) -> Tensor
Constructs a complex tensor with its real part equal to :attr:`real` and its
imaginary part equal to :attr:`imag`.
Args:
real (Tensor): The real part of the complex tensor. Must be float or double.
imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
as :attr:`real`.
Keyword args:
out (Tensor): If the inputs are ``torch.float32``, must be
``torch.complex64``. If the inputs are ``torch.float64``, must be
``torch.complex128``.
Example::
>>> real = torch.tensor([1, 2], dtype=torch.float32)
>>> imag = torch.tensor([3, 4], dtype=torch.float32)
>>> z = torch.complex(real, imag)
>>> z
tensor([(1.+3.j), (2.+4.j)])
>>> z.dtype
torch.complex64
""",
)
add_docstr(
torch.polar,
r"""
polar(abs, angle, *, out=None) -> Tensor
Constructs a complex tensor whose elements are Cartesian coordinates
corresponding to the polar coordinates with absolute value :attr:`abs` and angle
:attr:`angle`.
.. math::
\text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
.. note::
`torch.polar` is similar to
`std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
and does not compute the polar decomposition
of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
infinite.
"""
+ r"""
Args:
abs (Tensor): The absolute value the complex tensor. Must be float or double.
angle (Tensor): The angle of the complex tensor. Must be same dtype as
:attr:`abs`.
Keyword args:
out (Tensor): If the inputs are ``torch.float32``, must be
``torch.complex64``. If the inputs are ``torch.float64``, must be
``torch.complex128``.
Example::
>>> import numpy as np
>>> abs = torch.tensor([1, 2], dtype=torch.float64)
>>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
>>> z = torch.polar(abs, angle)
>>> z
tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
""",
)
add_docstr(
torch.conj_physical,
r"""
conj_physical(input, *, out=None) -> Tensor
Computes the element-wise conjugate of the given :attr:`input` tensor.
If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
.. note::
This performs the conjugate operation regardless of the fact conjugate bit is set or not.
.. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
when :attr:`input` is of non-complex dtype to be compatible with this change.
.. math::
\text{out}_{i} = conj(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
""".format(
**common_args
),
)
add_docstr(
torch.conj,
r"""
conj(input) -> Tensor
Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
this function just returns :attr:`input`.
.. note::
:func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
at any time using :func:`torch.resolve_conj`.
.. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
when :attr:`input` is of non-complex dtype to be compatible with this change.
Args:
{input}
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> x.is_conj()
False
>>> y = torch.conj(x)
>>> y.is_conj()
True
""".format(
**common_args
),
)
add_docstr(
torch.resolve_conj,
r"""
resolve_conj(input) -> Tensor
Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
Args:
{input}
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> y = x.conj()
>>> y.is_conj()
True
>>> z = y.resolve_conj()
>>> z
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
>>> z.is_conj()
False
""".format(
**common_args
),
)
add_docstr(
torch.resolve_neg,
r"""
resolve_neg(input) -> Tensor
Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
Args:
{input}
Example::
>>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
>>> y = x.conj()
>>> z = y.imag
>>> z.is_neg()
True
>>> out = y.resolve_neg()
>>> out
tensor([-1, -2, -3])
>>> out.is_neg()
False
""".format(
**common_args
),
)
add_docstr(
torch.copysign,
r"""
copysign(input, other, *, out=None) -> Tensor
Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
.. math::
\text{out}_{i} = \begin{cases}
-|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
|\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
\end{cases}
"""
+ r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
and integer and float inputs.
Args:
input (Tensor): magnitudes.
other (Tensor or Number): contains value(s) whose signbit(s) are
applied to the magnitudes in :attr:`input`.
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244])
>>> torch.copysign(a, 1)
tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.7079, 0.2778, -1.0249, 0.5719],
[-0.0059, -0.2600, -0.4475, -1.3948],
[ 0.3667, -0.9567, -2.5757, -0.1751],
[ 0.2046, -0.0742, 0.2998, -0.1054]])
>>> b = torch.randn(4)
tensor([ 0.2373, 0.3120, 0.3190, -1.1128])
>>> torch.copysign(a, b)
tensor([[ 0.7079, 0.2778, 1.0249, -0.5719],
[ 0.0059, 0.2600, 0.4475, -1.3948],
[ 0.3667, 0.9567, 2.5757, -0.1751],
[ 0.2046, 0.0742, 0.2998, -0.1054]])
>>> a = torch.tensor([1.])
>>> b = torch.tensor([-0.])
>>> torch.copysign(a, b)
tensor([-1.])
.. note::
copysign handles signed zeros. If the other argument has a negative zero (-0),
the corresponding output value will be negative.
""".format(
**common_args
),
)
add_docstr(
torch.cos,
r"""
cos(input, *, out=None) -> Tensor
Returns a new tensor with the cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cos(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
>>> torch.cos(a)
tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
""".format(
**common_args
),
)
add_docstr(
torch.cosh,
r"""
cosh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic cosine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \cosh(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
>>> torch.cosh(a)
tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
.. note::
When :attr:`input` is on the CPU, the implementation of torch.cosh may use
the Sleef library, which rounds very large results to infinity or negative
infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
""".format(
**common_args
),
)
add_docstr(
torch.cross,
r"""
cross(input, other, dim=None, *, out=None) -> Tensor
Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
and :attr:`other`.
Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
of vectors, for which it computes the product along the dimension :attr:`dim`.
In this case, the output has the same batch dimensions as the inputs.
If :attr:`dim` is not given, it defaults to the first dimension found with the
size 3. Note that this might be unexpected.
.. seealso::
:func:`torch.linalg.cross` which requires specifying dim (defaulting to -1).
.. warning:: This function may change in a future PyTorch release to match
the default behaviour in :func:`torch.linalg.cross`. We recommend using
:func:`torch.linalg.cross`.
Args:
{input}
other (Tensor): the second input tensor
dim (int, optional): the dimension to take the cross-product in.
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 3)
>>> a
tensor([[-0.3956, 1.1455, 1.6895],
[-0.5849, 1.3672, 0.3599],
[-1.1626, 0.7180, -0.0521],
[-0.1339, 0.9902, -2.0225]])
>>> b = torch.randn(4, 3)
>>> b
tensor([[-0.0257, -1.4725, -1.2251],
[-1.1479, -0.7005, -1.9757],
[-1.3904, 0.3726, -1.1836],
[-0.9688, -0.7153, 0.2159]])
>>> torch.cross(a, b, dim=1)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
>>> torch.cross(a, b)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
""".format(
**common_args
),
)
add_docstr(
torch.logcumsumexp,
r"""
logcumsumexp(input, dim, *, out=None) -> Tensor
Returns the logarithm of the cumulative summation of the exponentiation of
elements of :attr:`input` in the dimension :attr:`dim`.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
{out}
Example::
>>> a = torch.randn(10)
>>> torch.logcumsumexp(a, dim=0)
tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
""".format(
**reduceops_common_args
),
)
add_docstr(
torch.cummax,
r"""
cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = max(x_1, x_2, x_3, \dots, x_i)
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
1.9946, -0.8209])
>>> torch.cummax(a, dim=0)
torch.return_types.cummax(
values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
1.9946, 1.9946]),
indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
""".format(
**reduceops_common_args
),
)
add_docstr(
torch.cummin,
r"""
cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = min(x_1, x_2, x_3, \dots, x_i)
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
0.9165, 1.6684])
>>> torch.cummin(a, dim=0)
torch.return_types.cummin(
values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
-1.3298, -1.3298]),
indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
""".format(
**reduceops_common_args
),
)
add_docstr(
torch.cumprod,
r"""
cumprod(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative product of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
{dtype}
{out}
Example::
>>> a = torch.randn(10)
>>> a
tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
-0.2129, -0.4206, 0.1968])
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
0.0014, -0.0006, -0.0001])
>>> a[5] = 0.0
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
0.0000, -0.0000, -0.0000])
""".format(
**reduceops_common_args
),
)
add_docstr(
torch.cumsum,
r"""
cumsum(input, dim, *, dtype=None, out=None) -> Tensor
Returns the cumulative sum of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 + x_2 + x_3 + \dots + x_i
Args:
{input}
dim (int): the dimension to do the operation over
Keyword args:
{dtype}
{out}
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595,
0.1850, -1.1571, -0.4243])
>>> torch.cumsum(a, dim=0)
tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058,
-1.8209, -2.9780, -3.4022])
""".format(
**reduceops_common_args
),
)
add_docstr(
torch.count_nonzero,
r"""
count_nonzero(input, dim=None) -> Tensor
Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
If no dim is specified then all non-zeros in the tensor are counted.
Args:
{input}
dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
Example::
>>> x = torch.zeros(3,3)
>>> x[torch.randn(3,3) > 0.5] = 1
>>> x
tensor([[0., 1., 1.],
[0., 0., 0.],
[0., 0., 1.]])
>>> torch.count_nonzero(x)
tensor(3)
>>> torch.count_nonzero(x, dim=0)
tensor([0, 1, 2])
""".format(
**reduceops_common_args
),
)
add_docstr(
torch.dequantize,
r"""
dequantize(tensor) -> Tensor
Returns an fp32 Tensor by dequantizing a quantized Tensor
Args:
tensor (Tensor): A quantized Tensor
.. function:: dequantize(tensors) -> sequence of Tensors
:noindex:
Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
Args:
tensors (sequence of Tensors): A list of quantized Tensors
""",
)
add_docstr(
torch.diag,
r"""
diag(input, diagonal=0, *, out=None) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
the diagonal elements of :attr:`input`.
The argument :attr:`diagonal` controls which diagonal to consider:
- If :attr:`diagonal` = 0, it is the main diagonal.
- If :attr:`diagonal` > 0, it is above the main diagonal.
- If :attr:`diagonal` < 0, it is below the main diagonal.
Args:
{input}
diagonal (int, optional): the diagonal to consider
Keyword args:
{out}
.. seealso::
:func:`torch.diagonal` always returns the diagonal of its input.
:func:`torch.diagflat` always constructs a tensor with diagonal elements
specified by the input.
Examples:
Get the square matrix where the input vector is the diagonal::
>>> a = torch.randn(3)
>>> a
tensor([ 0.5950,-0.0872, 2.3298])
>>> torch.diag(a)
tensor([[ 0.5950, 0.0000, 0.0000],
[ 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 2.3298]])
>>> torch.diag(a, 1)
tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
[ 0.0000, 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 0.0000, 2.3298],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
Get the k-th diagonal of a given matrix::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-0.4264, 0.0255,-0.1064],
[ 0.8795,-0.2429, 0.1374],
[ 0.1029,-0.6482,-1.6300]])
>>> torch.diag(a, 0)
tensor([-0.4264,-0.2429,-1.6300])
>>> torch.diag(a, 1)
tensor([ 0.0255, 0.1374])
""".format(
**common_args
),
)
add_docstr(
torch.diag_embed,
r"""
diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
Creates a tensor whose diagonals of certain 2D planes (specified by
:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
To facilitate creating batched diagonal matrices, the 2D planes formed by
the last two dimensions of the returned tensor are chosen by default.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
The size of the new matrix will be calculated to make the specified diagonal
of the size of the last input dimension.
Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
and :attr:`dim2` matters. Exchanging them is equivalent to changing the
sign of :attr:`offset`.
Applying :meth:`torch.diagonal` to the output of this function with
the same arguments yields a matrix identical to input. However,
:meth:`torch.diagonal` has different default dimensions, so those
need to be explicitly specified.
Args:
{input} Must be at least 1-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: -2.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: -1.
Example::
>>> a = torch.randn(2, 3)
>>> torch.diag_embed(a)
tensor([[[ 1.5410, 0.0000, 0.0000],
[ 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -2.1788]],
[[ 0.5684, 0.0000, 0.0000],
[ 0.0000, -1.0845, 0.0000],
[ 0.0000, 0.0000, -1.3986]]])
>>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
[ 0.0000, 0.5684, 0.0000, 0.0000]],
[[ 0.0000, 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -1.0845, 0.0000]],
[[ 0.0000, 0.0000, 0.0000, -2.1788],
[ 0.0000, 0.0000, 0.0000, -1.3986]],
[[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000]]])
""".format(
**common_args
),
)
add_docstr(
torch.diagflat,
r"""
diagflat(input, offset=0) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a tensor with more than one dimension, then returns a
2-D tensor with diagonal elements equal to a flattened :attr:`input`.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Args:
{input}
offset (int, optional): the diagonal to consider. Default: 0 (main
diagonal).
Examples::
>>> a = torch.randn(3)
>>> a
tensor([-0.2956, -0.9068, 0.1695])
>>> torch.diagflat(a)
tensor([[-0.2956, 0.0000, 0.0000],
[ 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.1695]])
>>> torch.diagflat(a, 1)
tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.1695],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
>>> a = torch.randn(2, 2)
>>> a
tensor([[ 0.2094, -0.3018],
[-0.1516, 1.9342]])
>>> torch.diagflat(a)
tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
[ 0.0000, -0.3018, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.1516, 0.0000],
[ 0.0000, 0.0000, 0.0000, 1.9342]])
""".format(
**common_args
),
)
add_docstr(
torch.diagonal,
r"""
diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
Returns a partial view of :attr:`input` with the its diagonal elements
with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
at the end of the shape.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Applying :meth:`torch.diag_embed` to the output of this function with
the same arguments yields a diagonal matrix with the diagonal entries
of the input. However, :meth:`torch.diag_embed` has different default
dimensions, so those need to be explicitly specified.
Args:
{input} Must be at least 2-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
Examples::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0854, 1.1431, -0.1752],
[ 0.8536, -0.0905, 0.0360],
[ 0.6927, -0.3735, -0.4945]])
>>> torch.diagonal(a, 0)
tensor([-1.0854, -0.0905, -0.4945])
>>> torch.diagonal(a, 1)
tensor([ 1.1431, 0.0360])
>>> x = torch.randn(2, 5, 4, 2)
>>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
[-1.1065, 1.0401, -0.2235, -0.7938]],
[[-1.7325, -0.3081, 0.6166, 0.2335],
[ 1.0500, 0.7336, -0.3836, -1.1015]]])
""".format(
**common_args
),
)
add_docstr(
torch.diagonal_scatter,
r"""
diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` along
the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
and :attr:`dim2`.
This function returns a tensor with fresh storage; it does not
return a view.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Args:
{input} Must be at least 2-dimensional.
src (Tensor): the tensor to embed into :attr:`input`.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
``torch.diagonal(input, offset, dim1, dim2)``
Examples::
>>> a = torch.zeros(3, 3)
>>> a
tensor([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> torch.diagonal_scatter(a, torch.ones(3), 0)
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> torch.diagonal_scatter(a, torch.ones(2), 1)
tensor([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
""".format(
**common_args
),
)
add_docstr(
torch.as_strided_scatter,
r"""
as_strided_scatter(input, src, size, stride, storage_offset=0) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` along
the elements corresponding to the result of calling
input.as_strided(size, stride, storage_offset).
This function returns a tensor with fresh storage; it does not
return a view.
Args:
{input}
size (tuple or ints): the shape of the output tensor
stride (tuple or ints): the stride of the output tensor
storage_offset (int, optional): the offset in the underlying storage of the output tensor
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
`torch.as_strided(input, size, stride, storage_offset)`
Example::
>>> a = torch.arange(4).reshape(2, 2) + 1
>>> a
tensor([[1, 2],
[3, 4]])
>>> b = torch.zeros(3, 3)
>>> b
tensor([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
tensor([[1., 3., 2.],
[4., 0., 0.],
[0., 0., 0.]])
""".format(
**common_args
),
)
add_docstr(
torch.diff,
r"""
diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
Computes the n-th forward difference along the given dimension.
The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
differences are calculated by using :func:`torch.diff` recursively.
Args:
input (Tensor): the tensor to compute the differences on
n (int, optional): the number of times to recursively compute the difference
dim (int, optional): the dimension to compute the difference along.
Default is the last dimension.
prepend, append (Tensor, optional): values to prepend or append to
:attr:`input` along :attr:`dim` before computing the difference.
Their dimensions must be equivalent to that of input, and their shapes
must match input's shape except on :attr:`dim`.
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 3, 2])
>>> torch.diff(a)
tensor([ 2, -1])
>>> b = torch.tensor([4, 5])
>>> torch.diff(a, append=b)
tensor([ 2, -1, 2, 1])
>>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
>>> torch.diff(c, dim=0)
tensor([[2, 2, 2]])
>>> torch.diff(c, dim=1)
tensor([[1, 1],
[1, 1]])
""".format(
**common_args
),
)
add_docstr(
torch.digamma,
r"""
digamma(input, *, out=None) -> Tensor
Alias for :func:`torch.special.digamma`.
""",
)
add_docstr(
torch.dist,
r"""
dist(input, other, p=2) -> Tensor
Returns the p-norm of (:attr:`input` - :attr:`other`)
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
{input}
other (Tensor): the Right-hand-side input tensor
p (float, optional): the norm to be computed
Example::
>>> x = torch.randn(4)
>>> x
tensor([-1.5393, -0.8675, 0.5916, 1.6321])
>>> y = torch.randn(4)
>>> y
tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
>>> torch.dist(x, y, 3.5)
tensor(1.6727)
>>> torch.dist(x, y, 3)
tensor(1.6973)
>>> torch.dist(x, y, 0)
tensor(4.)
>>> torch.dist(x, y, 1)
tensor(2.6537)
""".format(
**common_args
),
)
add_docstr(
torch.div,
r"""
div(input, other, *, rounding_mode=None, out=None) -> Tensor
Divides each element of the input ``input`` by the corresponding element of
:attr:`other`.
.. math::
\text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
.. note::
By default, this performs a "true" division like Python 3.
See the :attr:`rounding_mode` argument for floor division.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Always promotes integer types to the default scalar type.
Args:
input (Tensor): the dividend
other (Tensor or Number): the divisor
Keyword args:
rounding_mode (str, optional): Type of rounding applied to the result:
* None - default behavior. Performs no rounding and, if both :attr:`input` and
:attr:`other` are integer types, promotes the inputs to the default scalar type.
Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
* ``"trunc"`` - rounds the results of the division towards zero.
Equivalent to C-style integer division.
* ``"floor"`` - rounds the results of the division down.
Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
{out}
Examples::
>>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
>>> torch.div(x, 0.5)
tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274])
>>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
... [ 0.1815, -1.0111, 0.9805, -1.5923],
... [ 0.1062, 1.4581, 0.7759, -1.2344],
... [-0.1830, -0.0313, 1.1908, -1.4757]])
>>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
>>> torch.div(a, b)
tensor([[-0.4620, -6.6051, 0.5676, 1.2639],
[ 0.2260, -3.4509, -1.2086, 6.8990],
[ 0.1322, 4.9764, -0.9564, 5.3484],
[-0.2278, -0.1068, -1.4678, 6.3938]])
>>> torch.div(a, b, rounding_mode='trunc')
tensor([[-0., -6., 0., 1.],
[ 0., -3., -1., 6.],
[ 0., 4., -0., 5.],
[-0., -0., -1., 6.]])
>>> torch.div(a, b, rounding_mode='floor')
tensor([[-1., -7., 0., 1.],
[ 0., -4., -2., 6.],
[ 0., 4., -1., 5.],
[-1., -1., -2., 6.]])
""".format(
**common_args
),
)
add_docstr(
torch.divide,
r"""
divide(input, other, *, rounding_mode=None, out=None) -> Tensor
Alias for :func:`torch.div`.
""",
)
add_docstr(
torch.dot,
r"""
dot(input, other, *, out=None) -> Tensor
Computes the dot product of two 1D tensors.
.. note::
Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
of two 1D tensors with the same number of elements.
Args:
input (Tensor): first tensor in the dot product, must be 1D.
other (Tensor): second tensor in the dot product, must be 1D.
Keyword args:
{out}
Example::
>>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
tensor(7)
""".format(
**common_args
),
)
add_docstr(
torch.vdot,
r"""
vdot(input, other, *, out=None) -> Tensor
Computes the dot product of two 1D vectors along a dimension.
In symbols, this function computes
.. math::
\sum_{i=1}^n \overline{x_i}y_i.
where :math:`\overline{x_i}` denotes the conjugate for complex
vectors, and it is the identity for real vectors.
.. note::
Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
of two 1D tensors with the same number of elements.
.. seealso::
:func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
Args:
input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
other (Tensor): second tensor in the dot product, must be 1D.
Keyword args:
"""
+ rf"""
.. note:: {common_args["out"]}
"""
+ r"""
Example::
>>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
tensor(7)
>>> a = torch.tensor((1 +2j, 3 - 1j))
>>> b = torch.tensor((2 +1j, 4 - 0j))
>>> torch.vdot(a, b)
tensor([16.+1.j])
>>> torch.vdot(b, a)
tensor([16.-1.j])
""",
)
add_docstr(
torch.eig,
r"""
eig(input, eigenvectors=False, *, out=None) -> (Tensor, Tensor)
Computes the eigenvalues and eigenvectors of a real square matrix.
.. note::
Since eigenvalues and eigenvectors might be complex, backward pass is supported only
if eigenvalues and eigenvectors are all real valued.
When :attr:`input` is on CUDA, :func:`torch.eig() <torch.eig>` causes
host-device synchronization.
.. warning::
:func:`torch.eig` is deprecated in favor of :func:`torch.linalg.eig`
and will be removed in a future PyTorch release.
:func:`torch.linalg.eig` returns complex tensors of dtype `cfloat` or `cdouble`
rather than real tensors mimicking complex tensors.
``L, _ = torch.eig(A)`` should be replaced with
.. code :: python
L_complex = torch.linalg.eigvals(A)
``L, V = torch.eig(A, eigenvectors=True)`` should be replaced with
.. code :: python
L_complex, V_complex = torch.linalg.eig(A)
Args:
input (Tensor): the square matrix of shape :math:`(n \times n)` for which the eigenvalues and eigenvectors
will be computed
eigenvectors (bool): ``True`` to compute both eigenvalues and eigenvectors;
otherwise, only eigenvalues will be computed
Keyword args:
out (tuple, optional): the output tensors
Returns:
(Tensor, Tensor): A namedtuple (eigenvalues, eigenvectors) containing
- **eigenvalues** (*Tensor*): Shape :math:`(n \times 2)`. Each row is an eigenvalue of ``input``,
where the first element is the real part and the second element is the imaginary part.
The eigenvalues are not necessarily ordered.
- **eigenvectors** (*Tensor*): If ``eigenvectors=False``, it's an empty tensor.
Otherwise, this tensor of shape :math:`(n \times n)` can be used to compute normalized (unit length)
eigenvectors of corresponding eigenvalues as follows.
If the corresponding `eigenvalues[j]` is a real number, column `eigenvectors[:, j]` is the eigenvector
corresponding to `eigenvalues[j]`.
If the corresponding `eigenvalues[j]` and `eigenvalues[j + 1]` form a complex conjugate pair, then the
true eigenvectors can be computed as
:math:`\text{true eigenvector}[j] = eigenvectors[:, j] + i \times eigenvectors[:, j + 1]`,
:math:`\text{true eigenvector}[j + 1] = eigenvectors[:, j] - i \times eigenvectors[:, j + 1]`.
Example::
Trivial example with a diagonal matrix. By default, only eigenvalues are computed:
>>> a = torch.diag(torch.tensor([1, 2, 3], dtype=torch.double))
>>> e, v = torch.eig(a)
>>> e
tensor([[1., 0.],
[2., 0.],
[3., 0.]], dtype=torch.float64)
>>> v
tensor([], dtype=torch.float64)
Compute also the eigenvectors:
>>> e, v = torch.eig(a, eigenvectors=True)
>>> e
tensor([[1., 0.],
[2., 0.],
[3., 0.]], dtype=torch.float64)
>>> v
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=torch.float64)
""",
)
add_docstr(
torch.eq,
r"""
eq(input, other, *, out=None) -> Tensor
Computes element-wise equality
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
Example::
>>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[ True, False],
[False, True]])
""".format(
**common_args
),
)
add_docstr(
torch.equal,
r"""
equal(input, other) -> bool
``True`` if two tensors have the same size and elements, ``False`` otherwise.
Example::
>>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
True
""",
)
add_docstr(
torch.erf,
r"""
erf(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erf`.
""",
)
add_docstr(
torch.erfc,
r"""
erfc(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erfc`.
""",
)
add_docstr(
torch.erfinv,
r"""
erfinv(input, *, out=None) -> Tensor
Alias for :func:`torch.special.erfinv`.
""",
)
add_docstr(
torch.exp,
r"""
exp(input, *, out=None) -> Tensor
Returns a new tensor with the exponential of the elements
of the input tensor :attr:`input`.
.. math::
y_{i} = e^{x_{i}}
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.exp(torch.tensor([0, math.log(2.)]))
tensor([ 1., 2.])
""".format(
**common_args
),
)
add_docstr(
torch.exp2,
r"""
exp2(input, *, out=None) -> Tensor
Alias for :func:`torch.special.exp2`.
""",
)
add_docstr(
torch.expm1,
r"""
expm1(input, *, out=None) -> Tensor
Alias for :func:`torch.special.expm1`.
""",
)
add_docstr(
torch.eye,
r"""
eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows
m (int, optional): the number of columns with default being :attr:`n`
Keyword arguments:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Returns:
Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
Example::
>>> torch.eye(3)
tensor([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.floor,
r"""
floor(input, *, out=None) -> Tensor
Returns a new tensor with the floor of the elements of :attr:`input`,
the largest integer less than or equal to each element.
.. math::
\text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.8166, 1.5308, -0.2530, -0.2091])
>>> torch.floor(a)
tensor([-1., 1., -1., -1.])
""".format(
**common_args
),
)
add_docstr(
torch.floor_divide,
r"""
floor_divide(input, other, *, out=None) -> Tensor
.. note::
Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
truncation division. To restore the previous behavior use
:func:`torch.div` with ``rounding_mode='trunc'``.
Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
the result.
.. math::
\text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
"""
+ r"""
Supports broadcasting to a common shape, type promotion, and integer and float inputs.
Args:
input (Tensor or Number): the dividend
other (Tensor or Number): the divisor
Keyword args:
{out}
Example::
>>> a = torch.tensor([4.0, 3.0])
>>> b = torch.tensor([2.0, 2.0])
>>> torch.floor_divide(a, b)
tensor([2.0, 1.0])
>>> torch.floor_divide(a, 1.4)
tensor([2.0, 2.0])
""".format(
**common_args
),
)
add_docstr(
torch.fmod,
r"""
fmod(input, other, *, out=None) -> Tensor
Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
The result has the same sign as the dividend :attr:`input` and its absolute value
is less than that of :attr:`other`.
This function may be defined in terms of :func:`torch.div` as
.. code:: python
torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
When the divisor is zero, returns ``NaN`` for floating point dtypes
on both CPU and GPU; raises ``RuntimeError`` for integer division by
zero on CPU; Integer division by zero on GPU may return any value.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
.. seealso::
:func:`torch.remainder` which implements Python's modulus operator.
This one is defined using division rounding down the result.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
{out}
Example::
>>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([-1., -0., -1., 1., 0., 1.])
>>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
""".format(
**common_args
),
)
add_docstr(
torch.frac,
r"""
frac(input, *, out=None) -> Tensor
Computes the fractional portion of each element in :attr:`input`.
.. math::
\text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
Example::
>>> torch.frac(torch.tensor([1, 2.5, -3.2]))
tensor([ 0.0000, 0.5000, -0.2000])
""",
)
add_docstr(
torch.frexp,
r"""
frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
Decomposes :attr:`input` into mantissa and exponent tensors
such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
The range of mantissa is the open interval (-1, 1).
Supports float inputs.
Args:
input (Tensor): the input tensor
Keyword args:
out (tuple, optional): the output tensors
Example::
>>> x = torch.arange(9.)
>>> mantissa, exponent = torch.frexp(x)
>>> mantissa
tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
>>> exponent
tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
>>> torch.ldexp(mantissa, exponent)
tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
""",
)
add_docstr(
torch.from_numpy,
r"""
from_numpy(ndarray) -> Tensor
Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
The returned tensor and :attr:`ndarray` share the same memory. Modifications to
the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
tensor is not resizable.
It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
and ``numpy.bool``.
.. warning::
Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.from_numpy(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
""",
)
add_docstr(
torch.frombuffer,
r"""
frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
Creates a 1-dimensional :class:`Tensor` from an object that implements
the Python buffer protocol.
Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
elements.
Note that either of the following must be true:
1. :attr:`count` is a positive non-zero number, and the total number of bytes
in the buffer is less than :attr:`offset` plus :attr:`count` times the size
(in bytes) of :attr:`dtype`.
2. :attr:`count` is negative, and the length (number of bytes) of the buffer
subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
:attr:`dtype`.
The returned tensor and buffer share the same memory. Modifications to
the tensor will be reflected in the buffer and vice versa. The returned
tensor is not resizable.
.. note::
This function increments the reference count for the object that
owns the shared memory. Therefore, such memory will not be deallocated
before the returned tensor goes out of scope.
.. warning::
This function's behavior is undefined when passed an object implementing
the buffer protocol whose data is not on the CPU. Doing so is likely to
cause a segmentation fault.
.. warning::
This function does not try to infer the :attr:`dtype` (hence, it is not
optional). Passing a different :attr:`dtype` than its source may result
in unexpected behavior.
Args:
buffer (object): a Python object that exposes the buffer interface.
Keyword args:
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
count (int, optional): the number of desired elements to be read.
If negative, all the elements (until the end of the buffer) will be
read. Default: -1.
offset (int, optional): the number of bytes to skip at the start of
the buffer. Default: 0.
{requires_grad}
Example::
>>> import array
>>> a = array.array('i', [1, 2, 3])
>>> t = torch.frombuffer(a, dtype=torch.int32)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
>>> # Interprets the signed char bytes as 32-bit integers.
>>> # Each 4 signed char elements will be interpreted as
>>> # 1 signed 32-bit integer.
>>> import array
>>> a = array.array('b', [-1, 0, 0, 0])
>>> torch.frombuffer(a, dtype=torch.int32)
tensor([255], dtype=torch.int32)
""".format(
**factory_common_args
),
)
add_docstr(
torch.flatten,
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
The order of elements in :attr:`input` is unchanged.
Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
.. note::
Flattening a zero-dimensional tensor will return a one-dimensional view.
Args:
{input}
start_dim (int): the first dim to flatten
end_dim (int): the last dim to flatten
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.flatten(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
>>> torch.flatten(t, start_dim=1)
tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
""".format(
**common_args
),
)
add_docstr(
torch.unflatten,
r"""
unflatten(input, dim, sizes) -> Tensor
Expands a dimension of the input tensor over multiple dimensions.
.. seealso::
:func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
Args:
{input}
dim (int): Dimension to be unflattened, specified as an index into
``input.shape``.
sizes (Tuple[int]): New shape of the unflattened dimension.
One of its elements can be `-1` in which case the corresponding output
dimension is inferred. Otherwise, the product of ``sizes`` *must*
equal ``input.shape[dim]``.
Returns:
A View of input with the specified dimension unflattened.
Examples::
>>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
torch.Size([3, 2, 2, 1])
>>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
torch.Size([3, 2, 2, 1])
>>> torch.unflatten(torch.randn(5, 12, 3), -1, (2, 2, 3, 1, 1)).shape
torch.Size([5, 2, 2, 3, 1, 1, 3])
""".format(
**common_args
),
)
add_docstr(
torch.gather,
r"""
gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
Gathers values along an axis specified by `dim`.
For a 3-D tensor the output is specified by::
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
:attr:`input` and :attr:`index` must have the same number of dimensions.
It is also required that ``index.size(d) <= input.size(d)`` for all
dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (LongTensor): the indices of elements to gather
Keyword arguments:
sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
out (Tensor, optional): the destination tensor
Example::
>>> t = torch.tensor([[1, 2], [3, 4]])
>>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
tensor([[ 1, 1],
[ 4, 3]])
""",
)
add_docstr(
torch.gcd,
r"""
gcd(input, other, *, out=None) -> Tensor
Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`gcd(0, 0) = 0`.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.gcd(a, b)
tensor([1, 2, 5])
>>> c = torch.tensor([3])
>>> torch.gcd(a, c)
tensor([1, 1, 3])
""".format(
**common_args
),
)
add_docstr(
torch.ge,
r"""
ge(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \geq \text{other}` element-wise.
"""
+ r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
Example::
>>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, True], [False, True]])
""".format(
**common_args
),
)
add_docstr(
torch.greater_equal,
r"""
greater_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ge`.
""",
)
add_docstr(
torch.gradient,
r"""
gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
one or more dimensions using the `second-order accurate central differences method
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
is estimated using `Taylor’s theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point and :math:`x+h_r` be point neighboring it, the partial gradient at
:math:`f(x+h_r)` is estimated using:
.. math::
\begin{aligned}
f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(x_r)}{6} \\
\end{aligned}
where :math:`x_r` is a number in the interval :math:`[x, x+ h_r]` and using the fact that :math:`f \in C^3`
we derive :
.. math::
f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
+ ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
.. note::
We estimate the gradient of functions in complex domain
:math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
The value of each partial derivative at the boundary points is computed differently. See edge_order below.
Args:
input (``Tensor``): the tensor that represents the values of the function
Keyword args:
spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
the coordinates are (t0[1], t1[2], t2[3])
dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default
the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of
the :attr:`spacing` argument must correspond with the specified dims."
edge_order (``int``, optional): 1 or 2, for `first-order
<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
`second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
estimation of the boundary ("edge") values, respectively.
Examples::
>>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
>>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
>>> values = torch.tensor([4., 1., 1., 16.], )
>>> torch.gradient(values, spacing = coordinates)
(tensor([-3., -2., 2., 5.]),)
>>> # Estimates the gradient of the R^2 -> R function whose samples are
>>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
>>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
>>> # partial derivative for both dimensions.
>>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
>>> torch.gradient(t)
(tensor([[ 9., 18., 36., 72.],
[ 9., 18., 36., 72.]]),
tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]))
>>> # A scalar value for spacing modifies the relationship between tensor indices
>>> # and input coordinates by multiplying the indices to find the
>>> # coordinates. For example, below the indices of the innermost
>>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
>>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
[ 5.0000, 7.5000, 15.0000, 20.0000]]))
>>> # doubling the spacing between samples halves the estimated partial gradients.
>>>
>>> # Estimates only the partial derivative for dimension 1
>>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
(tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
[10.0000, 15.0000, 30.0000, 40.0000]]),)
>>> # When spacing is a list of scalars, the relationship between the tensor
>>> # indices and input coordinates changes based on dimension.
>>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
>>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
>>> # 0, 1 translate to coordinates of [0, 2].
>>> torch.gradient(t, spacing = [3., 2.])
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
>>> # The following example is a replication of the previous one with explicit
>>> # coordinates.
>>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
>>> torch.gradient(t, spacing = coords)
(tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
[ 4.5000, 9.0000, 18.0000, 36.0000]]),
tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
[ 3.3333, 5.0000, 10.0000, 13.3333]]))
""",
)
add_docstr(
torch.geqrf,
r"""
geqrf(input, *, out=None) -> (Tensor, Tensor)
This is a low-level function for calling LAPACK's geqrf directly. This function
returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
Computes a QR decomposition of :attr:`input`.
Both `Q` and `R` matrices are stored in the same output tensor `a`.
The elements of `R` are stored on and above the diagonal.
Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
are stored below the diagonal.
The results of this function can be used together with :func:`torch.linalg.householder_product`
to obtain the `Q` matrix or
with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
for an efficient matrix-matrix multiplication.
See `LAPACK documentation for geqrf`_ for further details.
.. note::
See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
Args:
input (Tensor): the input matrix
Keyword args:
out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
.. _LAPACK documentation for geqrf:
http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
""",
)
add_docstr(
torch.inner,
r"""
inner(input, other, *, out=None) -> Tensor
Computes the dot product for 1D tensors. For higher dimensions, sums the product
of elements from :attr:`input` and :attr:`other` along their last dimension.
.. note::
If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
to `torch.mul(input, other)`.
If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
dimension must match and the result is equivalent to `torch.tensordot(input,
other, dims=([-1], [-1]))`
Args:
input (Tensor): First input tensor
other (Tensor): Second input tensor
Keyword args:
out (Tensor, optional): Optional output tensor to write result into. The output
shape is `input.shape[:-1] + other.shape[:-1]`.
Example::
# Dot product
>>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
tensor(7)
# Multidimensional input tensors
>>> a = torch.randn(2, 3)
>>> a
tensor([[0.8173, 1.0874, 1.1784],
[0.3279, 0.1234, 2.7894]])
>>> b = torch.randn(2, 4, 3)
>>> b
tensor([[[-0.4682, -0.7159, 0.1506],
[ 0.4034, -0.3657, 1.0387],
[ 0.9892, -0.6684, 0.1774],
[ 0.9482, 1.3261, 0.3917]],
[[ 0.4537, 0.7493, 1.1724],
[ 0.2291, 0.5749, -0.2267],
[-0.7920, 0.3607, -0.3701],
[ 1.3666, -0.5850, -1.7242]]])
>>> torch.inner(a, b)
tensor([[[-0.9837, 1.1560, 0.2907, 2.6785],
[ 2.5671, 0.5452, -0.6912, -1.5509]],
[[ 0.1782, 2.9843, 0.7366, 1.5672],
[ 3.5115, -0.4864, -1.2476, -4.4337]]])
# Scalar input
>>> torch.inner(a, torch.tensor(2))
tensor([[1.6347, 2.1748, 2.3567],
[0.6558, 0.2469, 5.5787]])
""",
)
add_docstr(
torch.outer,
r"""
outer(input, vec2, *, out=None) -> Tensor
Outer product of :attr:`input` and :attr:`vec2`.
If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): 1-D input vector
vec2 (Tensor): 1-D input vector
Keyword args:
out (Tensor, optional): optional output matrix
Example::
>>> v1 = torch.arange(1., 5.)
>>> v2 = torch.arange(1., 4.)
>>> torch.outer(v1, v2)
tensor([[ 1., 2., 3.],
[ 2., 4., 6.],
[ 3., 6., 9.],
[ 4., 8., 12.]])
""",
)
add_docstr(
torch.ger,
r"""
ger(input, vec2, *, out=None) -> Tensor
Alias of :func:`torch.outer`.
.. warning::
This function is deprecated and will be removed in a future PyTorch release.
Use :func:`torch.outer` instead.
""",
)
add_docstr(
torch.get_default_dtype,
r"""
get_default_dtype() -> torch.dtype
Get the current default floating point :class:`torch.dtype`.
Example::
>>> torch.get_default_dtype() # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_dtype(torch.float64)
>>> torch.get_default_dtype() # default is now changed to torch.float64
torch.float64
>>> torch.set_default_tensor_type(torch.FloatTensor) # setting tensor type also affects this
>>> torch.get_default_dtype() # changed to torch.float32, the dtype for torch.FloatTensor
torch.float32
""",
)
add_docstr(
torch.get_num_threads,
r"""
get_num_threads() -> int
Returns the number of threads used for parallelizing CPU operations
""",
)
add_docstr(
torch.get_num_interop_threads,
r"""
get_num_interop_threads() -> int
Returns the number of threads used for inter-op parallelism on CPU
(e.g. in JIT interpreter)
""",
)
add_docstr(
torch.gt,
r"""
gt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} > \text{other}` element-wise.
"""
+ r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
Example::
>>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [False, False]])
""".format(
**common_args
),
)
add_docstr(
torch.greater,
r"""
greater(input, other, *, out=None) -> Tensor
Alias for :func:`torch.gt`.
""",
)
add_docstr(
torch.histc,
r"""
histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
Computes the histogram of a tensor.
The elements are sorted into equal width bins between :attr:`min` and
:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
maximum values of the data are used.
Elements lower than min and higher than max are ignored.
Args:
{input}
bins (int): number of histogram bins
min (Scalar): lower end of the range (inclusive)
max (Scalar): upper end of the range (inclusive)
Keyword args:
{out}
Returns:
Tensor: Histogram represented as a tensor
Example::
>>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
tensor([ 0., 2., 1., 0.])
""".format(
**common_args
),
)
add_docstr(
torch.histogram,
r"""
histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
Computes a histogram of the values in a tensor.
:attr:`bins` can be an integer or a 1D tensor.
If :attr:`bins` is an int, it specifies the number of equal-width bins.
By default, the lower and upper range of the bins is determined by the
minimum and maximum elements of the input tensor. The :attr:`range`
argument can be provided to specify a range for the bins.
If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
including the rightmost edge. It should contain at least 2 elements
and its elements should be increasing.
Args:
{input}
bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
defines the sequence of bin edges including the rightmost edge.
Keyword args:
range (tuple of float): Defines the range of the bins.
weight (Tensor): If provided, weight should have the same shape as input. Each value in
input contributes its associated weight towards its bin's result.
density (bool): If False, the result will contain the count (or total weight) in each bin.
If True, the result is the value of the probability density function over the bins,
normalized such that the integral over the range of the bins is 1.
{out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
Returns:
hist (Tensor): 1D Tensor containing the values of the histogram.
bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
Example::
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
(tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
>>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
(tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
""".format(
**common_args
),
)
add_docstr(
torch.histogramdd,
r"""
histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
Computes a multi-dimensional histogram of the values in a tensor.
Interprets the elements of an input tensor whose innermost dimension has size N
as a collection of N-dimensional points. Maps each of the points into a set of
N-dimensional bins and returns the number of points (or total weight) in each bin.
:attr:`input` must be a tensor with at least 2 dimensions.
If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
If input has three or more dimensions, all but the last dimension are flattened.
Each dimension is independently associated with its own strictly increasing sequence
of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
tensors. Alternatively, bin edges may be constructed automatically by passing a
sequence of integers specifying the number of equal-width bins in each dimension.
For each N-dimensional point in input:
- Each of its coordinates is binned independently among the bin edges
corresponding to its dimension
- Binning results are combined to identify the N-dimensional bin (if any)
into which the point falls
- If the point falls into a bin, the bin's count (or total weight) is incremented
- Points which do not fall into any bin do not contribute to the output
:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
the left and right edges of all bins. Every bin is exclusive of its left edge. Only
the rightmost bin is inclusive of its right edge.
If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
in each dimension. By default, the leftmost and rightmost bin edges in each dimension
are determined by the minimum and maximum elements of the input tensor in the
corresponding dimension. The :attr:`range` argument can be provided to manually
specify the leftmost and rightmost bin edges in each dimension.
If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
.. note::
See also :func:`torch.histogram`, which specifically computes 1D histograms.
While :func:`torch.histogramdd` infers the dimensionality of its bins and
binned values from the shape of :attr:`input`, :func:`torch.histogram`
accepts and flattens :attr:`input` of any shape.
Args:
{input}
bins: Tensor[], int[], or int.
If Tensor[], defines the sequences of bin edges.
If int[], defines the number of equal-width bins in each dimension.
If int, defines the number of equal-width bins for all dimensions.
Keyword args:
range (sequence of float): Defines the leftmost and rightmost bin edges
in each dimension.
weight (Tensor): By default, each value in the input has weight 1. If a weight
tensor is passed, each N-dimensional coordinate in input
contributes its associated weight towards its bin's result.
The weight tensor should have the same shape as the :attr:`input`
tensor excluding its innermost dimension N.
density (bool): If False (default), the result will contain the count (or total weight)
in each bin. If True, each count (weight) is divided by the total count
(total weight), then divided by the volume of its associated bin.
Returns:
hist (Tensor): N-dimensional Tensor containing the values of the histogram.
bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
Example::
>>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
... weight=torch.tensor([1., 2., 4., 8.]))
torch.return_types.histogramdd(
hist=tensor([[0., 1., 0.],
[2., 0., 0.],
[4., 0., 8.]]),
bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
tensor([0.0000, 0.6667, 1.3333, 2.0000])))
>>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
... range=[0., 1., 0., 1.], density=True)
torch.return_types.histogramdd(
hist=tensor([[2., 0.],
[0., 2.]]),
bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
tensor([0.0000, 0.5000, 1.0000])))
""",
)
# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
torch.histogramdd.__module__ = "torch"
add_docstr(
torch.hypot,
r"""
hypot(input, other, *, out=None) -> Tensor
Given the legs of a right triangle, return its hypotenuse.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
"""
+ r"""
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
tensor([5.0000, 5.6569, 6.4031])
""".format(
**common_args
),
)
add_docstr(
torch.i0,
r"""
i0(input, *, out=None) -> Tensor
Alias for :func:`torch.special.i0`.
""",
)
add_docstr(
torch.igamma,
r"""
igamma(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.gammainc`.
""",
)
add_docstr(
torch.igammac,
r"""
igammac(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.gammaincc`.
""",
)
add_docstr(
torch.index_select,
r"""
index_select(input, dim, index, *, out=None) -> Tensor
Returns a new tensor which indexes the :attr:`input` tensor along dimension
:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
The returned tensor has the same number of dimensions as the original tensor
(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
of :attr:`index`; other dimensions have the same size as in the original tensor.
.. note:: The returned tensor does **not** use the same storage as the original
tensor. If :attr:`out` has a different shape than expected, we
silently change it to the correct shape, reallocating the underlying
storage if necessary.
Args:
{input}
dim (int): the dimension in which we index
index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
Keyword args:
{out}
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-0.4664, 0.2647, -0.1228, -1.1068],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> indices = torch.tensor([0, 2])
>>> torch.index_select(x, 0, indices)
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> torch.index_select(x, 1, indices)
tensor([[ 0.1427, -0.5414],
[-0.4664, -0.1228],
[-1.1734, 0.7230]])
""".format(
**common_args
),
)
add_docstr(
torch.inverse,
r"""
inverse(input, *, out=None) -> Tensor
Alias for :func:`torch.linalg.inv`
""",
)
add_docstr(
torch.isin,
r"""
isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
a boolean tensor of the same shape as :attr:`elements` that is True for elements
in :attr:`test_elements` and False otherwise.
.. note::
One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
Args:
elements (Tensor or Scalar): Input elements
test_elements (Tensor or Scalar): Values against which to test for each input element
assume_unique (bool, optional): If True, assumes both :attr:`elements` and
:attr:`test_elements` contain unique elements, which can speed up the
calculation. Default: False
invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
values for elements *not* in :attr:`test_elements`. Default: False
Returns:
A boolean tensor of the same shape as :attr:`elements` that is True for elements in
:attr:`test_elements` and False otherwise
Example:
>>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
tensor([[False, True],
[ True, False]])
""",
)
add_docstr(
torch.isinf,
r"""
isinf(input) -> Tensor
Tests if each element of :attr:`input` is infinite
(positive or negative infinity) or not.
.. note::
Complex values are infinite when their real or imaginary part is
infinite.
Args:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
Example::
>>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([False, True, False, True, False])
""".format(
**common_args
),
)
add_docstr(
torch.isposinf,
r"""
isposinf(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` is positive infinity or not.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
>>> torch.isposinf(a)
tensor([False, True, False])
""".format(
**common_args
),
)
add_docstr(
torch.isneginf,
r"""
isneginf(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` is negative infinity or not.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
>>> torch.isneginf(a)
tensor([ True, False, False])
""".format(
**common_args
),
)
add_docstr(
torch.isclose,
r"""
isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
Returns a new tensor with boolean elements representing if each element of
:attr:`input` is "close" to the corresponding element of :attr:`other`.
Closeness is defined as:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
"""
+ r"""
where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
and/or :attr:`other` are nonfinite they are close if and only if
they are equal, with NaNs being considered equal to each other when
:attr:`equal_nan` is True.
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Examples::
>>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
tensor([ True, False, False])
>>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
tensor([True, True])
""",
)
add_docstr(
torch.isfinite,
r"""
isfinite(input) -> Tensor
Returns a new tensor with boolean elements representing if each element is `finite` or not.
Real values are finite when they are not NaN, negative infinity, or infinity.
Complex values are finite when both their real and imaginary parts are finite.
Args:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is finite and False elsewhere
Example::
>>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([True, False, True, False, False])
""".format(
**common_args
),
)
add_docstr(
torch.isnan,
r"""
isnan(input) -> Tensor
Returns a new tensor with boolean elements representing if each element of :attr:`input`
is NaN or not. Complex values are considered NaN when either their real
and/or imaginary part is NaN.
Arguments:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
Example::
>>> torch.isnan(torch.tensor([1, float('nan'), 2]))
tensor([False, True, False])
""".format(
**common_args
),
)
add_docstr(
torch.isreal,
r"""
isreal(input) -> Tensor
Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
Arguments:
{input}
Returns:
A boolean tensor that is True where :attr:`input` is real and False elsewhere
Example::
>>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
tensor([True, False, True])
""".format(
**common_args
),
)
add_docstr(
torch.is_floating_point,
r"""
is_floating_point(input) -> (bool)
Returns True if the data type of :attr:`input` is a floating point data type i.e.,
one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
Args:
{input}
""".format(
**common_args
),
)
add_docstr(
torch.is_complex,
r"""
is_complex(input) -> (bool)
Returns True if the data type of :attr:`input` is a complex data type i.e.,
one of ``torch.complex64``, and ``torch.complex128``.
Args:
{input}
""".format(
**common_args
),
)
add_docstr(
torch.is_grad_enabled,
r"""
is_grad_enabled() -> (bool)
Returns True if grad mode is currently enabled.
""".format(
**common_args
),
)
add_docstr(
torch.is_inference_mode_enabled,
r"""
is_inference_mode_enabled() -> (bool)
Returns True if inference mode is currently enabled.
""".format(
**common_args
),
)
add_docstr(
torch.is_inference,
r"""
is_inference(input) -> (bool)
Returns True if :attr:`input` is an inference tensor.
A non-view tensor is an inference tensor if and only if it was
allocated during inference mode. A view tensor is an inference
tensor if and only if the tensor it is a view of is an inference tensor.
For details on inference mode please see
`Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
Args:
{input}
""".format(
**common_args
),
)
add_docstr(
torch.is_conj,
r"""
is_conj(input) -> (bool)
Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
Args:
{input}
""".format(
**common_args
),
)
add_docstr(
torch.is_nonzero,
r"""
is_nonzero(input) -> (bool)
Returns True if the :attr:`input` is a single element tensor which is not equal to zero
after type conversions.
i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
``torch.tensor([False])``.
Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
of sparse tensors).
Args:
{input}
Examples::
>>> torch.is_nonzero(torch.tensor([0.]))
False
>>> torch.is_nonzero(torch.tensor([1.5]))
True
>>> torch.is_nonzero(torch.tensor([False]))
False
>>> torch.is_nonzero(torch.tensor([3]))
True
>>> torch.is_nonzero(torch.tensor([1, 3, 5]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with more than one value is ambiguous
>>> torch.is_nonzero(torch.tensor([]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with no values is ambiguous
""".format(
**common_args
),
)
add_docstr(
torch.kron,
r"""
kron(input, other, *, out=None) -> Tensor
Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
:math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
:math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
.. math::
(\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
\text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
Supports real-valued and complex-valued inputs.
.. note::
This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
:math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
.. math::
\mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
\vdots & \ddots & \vdots \\
a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
Arguments:
input (Tensor)
other (Tensor)
Keyword args:
out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
Examples::
>>> mat1 = torch.eye(2)
>>> mat2 = torch.ones(2, 2)
>>> torch.kron(mat1, mat2)
tensor([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> mat1 = torch.eye(2)
>>> mat2 = torch.arange(1, 5).reshape(2, 2)
>>> torch.kron(mat1, mat2)
tensor([[1., 2., 0., 0.],
[3., 4., 0., 0.],
[0., 0., 1., 2.],
[0., 0., 3., 4.]])
""",
)
add_docstr(
torch.kthvalue,
r"""
kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
smallest element of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each element found.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
are the same size as :attr:`input`, except in the dimension :attr:`dim` where
they are of size 1. Otherwise, :attr:`dim` is squeezed
(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
.. note::
When :attr:`input` is a CUDA tensor and there are multiple valid
:attr:`k` th values, this function may nondeterministically return
:attr:`indices` for any of them.
Args:
{input}
k (int): k for the k-th smallest element
dim (int, optional): the dimension to find the kth value along
{keepdim}
Keyword args:
out (tuple, optional): the output tuple of (Tensor, LongTensor)
can be optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.kthvalue(x, 4)
torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
>>> x=torch.arange(1.,7.).resize_(2,3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.]])
>>> torch.kthvalue(x, 2, 0, True)
torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
""".format(
**single_dim_common
),
)
add_docstr(
torch.lcm,
r"""
lcm(input, other, *, out=None) -> Tensor
Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.lcm(a, b)
tensor([15, 20, 15])
>>> c = torch.tensor([3])
>>> torch.lcm(a, c)
tensor([15, 30, 15])
""".format(
**common_args
),
)
add_docstr(
torch.ldexp,
r"""
ldexp(input, other, *, out=None) -> Tensor
Multiplies :attr:`input` by 2**:attr:`other`.
.. math::
\text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
"""
+ r"""
Typically this function is used to construct floating point numbers by multiplying
mantissas in :attr:`input` with integral powers of two created from the exponents
in :attr:`other`.
Args:
{input}
other (Tensor): a tensor of exponents, typically integers.
Keyword args:
{out}
Example::
>>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
tensor([2.])
>>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
tensor([ 2., 4., 8., 16.])
""".format(
**common_args
),
)
add_docstr(
torch.le,
r"""
le(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \leq \text{other}` element-wise.
"""
+ r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or Scalar): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is less than or equal to
:attr:`other` and False elsewhere
Example::
>>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, False], [True, True]])
""".format(
**common_args
),
)
add_docstr(
torch.less_equal,
r"""
less_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.le`.
""",
)
add_docstr(
torch.lerp,
r"""
lerp(input, end, weight, *, out=None)
Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
.. math::
\text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
"""
+ r"""
The shapes of :attr:`start` and :attr:`end` must be
:ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the tensor with the starting points
end (Tensor): the tensor with the ending points
weight (float or tensor): the weight for the interpolation formula
Keyword args:
{out}
Example::
>>> start = torch.arange(1., 5.)
>>> end = torch.empty(4).fill_(10)
>>> start
tensor([ 1., 2., 3., 4.])
>>> end
tensor([ 10., 10., 10., 10.])
>>> torch.lerp(start, end, 0.5)
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
>>> torch.lerp(start, end, torch.full_like(start, 0.5))
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
""".format(
**common_args
),
)
add_docstr(
torch.lgamma,
r"""
lgamma(input, *, out=None) -> Tensor
Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
.. math::
\text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
"""
+ """
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.arange(0.5, 2, 0.5)
>>> torch.lgamma(a)
tensor([ 0.5724, 0.0000, -0.1208])
""".format(
**common_args
),
)
add_docstr(
torch.linspace,
r"""
linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
.. math::
(\text{start},
\text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
\ldots,
\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
\text{end})
"""
+ """
From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): size of the constructed tensor
Keyword arguments:
{out}
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
{layout}
{device}
{requires_grad}
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
""".format(
**factory_common_args
),
)
add_docstr(
torch.log,
r"""
log(input, *, out=None) -> Tensor
Returns a new tensor with the natural logarithm of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{e} (x_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.rand(5) * 5
>>> a
tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
>>> torch.log(a)
tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204])
""".format(
**common_args
),
)
add_docstr(
torch.log10,
r"""
log10(input, *, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 10 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{10} (x_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
>>> torch.log10(a)
tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
""".format(
**common_args
),
)
add_docstr(
torch.log1p,
r"""
log1p(input, *, out=None) -> Tensor
Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
.. math::
y_i = \log_{e} (x_i + 1)
"""
+ r"""
.. note:: This function is more accurate than :func:`torch.log` for small
values of :attr:`input`
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
>>> torch.log1p(a)
tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
""".format(
**common_args
),
)
add_docstr(
torch.log2,
r"""
log2(input, *, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 2 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{2} (x_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
>>> torch.log2(a)
tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
""".format(
**common_args
),
)
add_docstr(
torch.logaddexp,
r"""
logaddexp(input, other, *, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs.
Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
in statistics where the calculated probabilities of events may be so small as to
exceed the range of normal floating point numbers. In such cases the logarithm
of the calculated probability is stored. This function allows adding
probabilities stored in such a fashion.
This op should be disambiguated with :func:`torch.logsumexp` which performs a
reduction on a single tensor.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
tensor([-0.3069, -0.6867, -0.8731])
>>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
tensor([-1., -2., -3.])
>>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
""".format(
**common_args
),
)
add_docstr(
torch.logaddexp2,
r"""
logaddexp2(input, other, *, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
:func:`torch.logaddexp` for more details.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
""".format(
**common_args
),
)
add_docstr(
torch.xlogy,
r"""
xlogy(input, other, *, out=None) -> Tensor
Alias for :func:`torch.special.xlogy`.
""",
)
add_docstr(
torch.logical_and,
r"""
logical_and(input, other, *, out=None) -> Tensor
Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute AND with
Keyword args:
{out}
Example::
>>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, False])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_and(a, b)
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b.double())
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b)
tensor([False, False, True, False])
>>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([False, False, True, False])
""".format(
**common_args
),
)
add_docstr(
torch.logical_not,
r"""
logical_not(input, *, out=None) -> Tensor
Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.logical_not(torch.tensor([True, False]))
tensor([False, True])
>>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
tensor([1, 0, 0], dtype=torch.int16)
""".format(
**common_args
),
)
add_docstr(
torch.logical_or,
r"""
logical_or(input, other, *, out=None) -> Tensor
Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute OR with
Keyword args:
{out}
Example::
>>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_or(a, b)
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b.double())
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b)
tensor([ True, True, True, False])
>>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, True, False])
""".format(
**common_args
),
)
add_docstr(
torch.logical_xor,
r"""
logical_xor(input, other, *, out=None) -> Tensor
Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute XOR with
Keyword args:
{out}
Example::
>>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([False, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_xor(a, b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b.double())
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, False, False])
""".format(
**common_args
),
)
add_docstr(
torch.logspace,
"""
logspace(start, end, steps, base=10.0, *, \
out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
"""
+ r"""
Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
with base :attr:`base`. That is, the values are:
.. math::
(\text{base}^{\text{start}},
\text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\ldots,
\text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
\text{base}^{\text{end}})
"""
+ """
From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): size of the constructed tensor
base (float, optional): base of the logarithm function. Default: ``10.0``.
Keyword arguments:
{out}
dtype (torch.dtype, optional): the data type to perform the computation in.
Default: if None, uses the global default dtype (see torch.get_default_dtype())
when both :attr:`start` and :attr:`end` are real,
and corresponding complex dtype when either is complex.
{layout}
{device}
{requires_grad}
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
""".format(
**factory_common_args
),
)
add_docstr(
torch.logsumexp,
r"""
logsumexp(input, dim, keepdim=False, *, out=None)
Returns the log of summed exponentials of each row of the :attr:`input`
tensor in the given dimension :attr:`dim`. The computation is numerically
stabilized.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
{keepdim_details}
Args:
{input}
{opt_dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(3, 3)
>>> torch.logsumexp(a, 1)
tensor([1.4907, 1.0593, 1.5696])
>>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
tensor(1.6859e-07)
""".format(
**multi_dim_common
),
)
add_docstr(
torch.lstsq,
r"""
lstsq(input, A, *, out=None) -> (Tensor, Tensor)
Computes the solution to the least squares and least norm problems for a full
rank matrix :math:`A` of size :math:`(m \times n)` and a matrix :math:`B` of
size :math:`(m \times k)`.
If :math:`m \geq n`, :func:`lstsq` solves the least-squares problem:
.. math::
\begin{array}{ll}
\min_X & \|AX-B\|_2.
\end{array}
If :math:`m < n`, :func:`lstsq` solves the least-norm problem:
.. math::
\begin{array}{llll}
\min_X & \|X\|_2 & \text{subject to} & AX = B.
\end{array}
Returned tensor :math:`X` has shape :math:`(\max(m, n) \times k)`. The first :math:`n`
rows of :math:`X` contains the solution. If :math:`m \geq n`, the residual sum of squares
for the solution in each column is given by the sum of squares of elements in the
remaining :math:`m - n` rows of that column.
.. warning::
:func:`torch.lstsq` is deprecated in favor of :func:`torch.linalg.lstsq`
and will be removed in a future PyTorch release. :func:`torch.linalg.lstsq`
has reversed arguments and does not return the QR decomposition in the returned tuple,
(it returns other information about the problem).
The returned `solution` in :func:`torch.lstsq` stores the residuals of the solution in the
last `m - n` columns in the case `m > n`. In :func:`torch.linalg.lstsq`, the residuals
are in the field 'residuals' of the returned named tuple.
Unpacking the solution as ``X = torch.lstsq(B, A).solution[:A.size(1)]`` should be replaced with
.. code:: python
X = torch.linalg.lstsq(A, B).solution
.. note::
The case when :math:`m < n` is not supported on the GPU.
Args:
input (Tensor): the matrix :math:`B`
A (Tensor): the :math:`m` by :math:`n` matrix :math:`A`
Keyword args:
out (tuple, optional): the optional destination tensor
Returns:
(Tensor, Tensor): A namedtuple (solution, QR) containing:
- **solution** (*Tensor*): the least squares solution
- **QR** (*Tensor*): the details of the QR factorization
.. note::
The returned matrices will always be transposed, irrespective of the strides
of the input matrices. That is, they will have stride `(1, m)` instead of
`(m, 1)`.
Example::
>>> A = torch.tensor([[1., 1, 1],
... [2, 3, 4],
... [3, 5, 2],
... [4, 2, 5],
... [5, 4, 3]])
>>> B = torch.tensor([[-10., -3],
... [ 12, 14],
... [ 14, 12],
... [ 16, 16],
... [ 18, 16]])
>>> X, _ = torch.lstsq(B, A)
>>> X
tensor([[ 2.0000, 1.0000],
[ 1.0000, 1.0000],
[ 1.0000, 2.0000],
[ 10.9635, 4.8501],
[ 8.9332, 5.2418]])
""",
)
add_docstr(
torch.lt,
r"""
lt(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} < \text{other}` element-wise.
"""
+ r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
Example::
>>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, False], [True, False]])
""".format(
**common_args
),
)
add_docstr(
torch.lu_unpack,
r"""
lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
.. seealso::
:func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
Args:
LU_data (Tensor): the packed LU factorization data
LU_pivots (Tensor): the packed LU factorization pivots
unpack_data (bool): flag indicating if the data should be unpacked.
If ``False``, then the returned ``L`` and ``U`` are empty tensors.
Default: ``True``
unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
If ``False``, then the returned ``P`` is an empty tensor.
Default: ``True``
Keyword args:
out (tuple, optional): output tuple of three tensors. Ignored if `None`.
Returns:
A namedtuple ``(P, L, U)``
Examples::
>>> A = torch.randn(2, 3, 3)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> P, L, U = torch.lu_unpack(LU, pivots)
>>> # We can recover A from the factorization
>>> A_ = P @ L @ U
>>> torch.allclose(A, A_)
True
>>> # LU factorization of a rectangular matrix:
>>> A = torch.randn(2, 3, 2)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> P, L, U = torch.lu_unpack(LU, pivots)
>>> # P, L, U are the same as returned by linalg.lu
>>> P_, L_, U_ = torch.linalg.lu(A)
>>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
True
""".format(
**common_args
),
)
add_docstr(
torch.less,
r"""
less(input, other, *, out=None) -> Tensor
Alias for :func:`torch.lt`.
""",
)
add_docstr(
torch.lu_solve,
r"""
lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
LU factorization of A from :func:`~linalg.lu_factor`.
This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
.. warning::
:func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
:func:`torch.lu_solve` will be removed in a future PyTorch release.
``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
.. code:: python
X = linalg.lu_solve(LU, pivots, B)
Arguments:
b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
is zero or more batch dimensions.
LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
where :math:`*` is zero or more batch dimensions.
LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
where :math:`*` is zero or more batch dimensions.
The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
:attr:`LU_data`.
Keyword args:
{out}
Example::
>>> A = torch.randn(2, 3, 3)
>>> b = torch.randn(2, 3, 1)
>>> LU, pivots = torch.linalg.lu_factor(A)
>>> x = torch.lu_solve(b, LU, pivots)
>>> torch.dist(A @ x, b)
tensor(1.00000e-07 *
2.8312)
""".format(
**common_args
),
)
add_docstr(
torch.masked_select,
r"""
masked_select(input, mask, *, out=None) -> Tensor
Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
the boolean mask :attr:`mask` which is a `BoolTensor`.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. note:: The returned tensor does **not** use the same storage
as the original tensor
Args:
{input}
mask (BoolTensor): the tensor containing the binary mask to index with
Keyword args:
{out}
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
[-1.2035, 1.2252, 0.5002, 0.6248],
[ 0.1307, -2.0608, 0.1244, 2.0139]])
>>> mask = x.ge(0.5)
>>> mask
tensor([[False, False, False, False],
[False, True, True, True],
[False, False, False, True]])
>>> torch.masked_select(x, mask)
tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
""".format(
**common_args
),
)
add_docstr(
torch.matrix_rank,
r"""
matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor
Returns the numerical rank of a 2-D tensor. The method to compute the
matrix rank is done using SVD by default. If :attr:`symmetric` is ``True``,
then :attr:`input` is assumed to be symmetric, and the computation of the
rank is done by obtaining the eigenvalues.
:attr:`tol` is the threshold below which the singular values (or the eigenvalues
when :attr:`symmetric` is ``True``) are considered to be 0. If :attr:`tol` is not
specified, :attr:`tol` is set to ``S.max() * max(S.size()) * eps`` where `S` is the
singular values (or the eigenvalues when :attr:`symmetric` is ``True``), and ``eps``
is the epsilon value for the datatype of :attr:`input`.
.. warning::
:func:`torch.matrix_rank` is deprecated in favor of :func:`torch.linalg.matrix_rank`
and will be removed in a future PyTorch release. The parameter :attr:`symmetric` was
renamed in :func:`torch.linalg.matrix_rank` to :attr:`hermitian`.
Args:
input (Tensor): the input 2-D tensor
tol (float, optional): the tolerance value. Default: ``None``
symmetric(bool, optional): indicates whether :attr:`input` is symmetric.
Default: ``False``
Keyword args:
{out}
Example::
>>> a = torch.eye(10)
>>> torch.matrix_rank(a)
tensor(10)
>>> b = torch.eye(10)
>>> b[0, 0] = 0
>>> torch.matrix_rank(b)
tensor(9)
""".format(
**common_args
),
)
add_docstr(
torch.matrix_power,
r"""
matrix_power(input, n, *, out=None) -> Tensor
Alias for :func:`torch.linalg.matrix_power`
""",
)
add_docstr(
torch.matrix_exp,
r"""
matrix_exp(A) -> Tensor
Alias for :func:`torch.linalg.matrix_exp`.
""",
)
add_docstr(
torch.max,
r"""
max(input) -> Tensor
Returns the maximum value of all elements in the ``input`` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``max(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6763, 0.7445, -2.2369]])
>>> torch.max(a)
tensor(0.7445)
.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each maximum value found
(argmax).
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than ``input``.
.. note:: If there are multiple maximal values in a reduced row then
the indices of the first maximal value are returned.
Args:
{input}
{dim}
{keepdim} Default: ``False``.
Keyword args:
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
[ 1.1949, -1.1127, -2.2379, -0.6702],
[ 1.5717, -0.9207, 0.1297, -1.8768],
[-0.6172, 1.0036, -0.6060, -0.2432]])
>>> torch.max(a, 1)
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
.. function:: max(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.maximum`.
""".format(
**single_dim_common
),
)
add_docstr(
torch.maximum,
r"""
maximum(input, other, *, out=None) -> Tensor
Computes the element-wise maximum of :attr:`input` and :attr:`other`.
.. note::
If one of the elements being compared is a NaN, then that element is returned.
:func:`maximum` is not supported for tensors with complex dtypes.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor((1, 2, -1))
>>> b = torch.tensor((3, 0, 4))
>>> torch.maximum(a, b)
tensor([3, 2, 4])
""".format(
**common_args
),
)
add_docstr(
torch.fmax,
r"""
fmax(input, other, *, out=None) -> Tensor
Computes the element-wise maximum of :attr:`input` and :attr:`other`.
This is like :func:`torch.maximum` except it handles NaNs differently:
if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
Only if both elements are NaN is NaN propagated.
This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
>>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
>>> torch.fmax(a, b)
tensor([9.7000, 0.5000, 3.1000, nan])
""".format(
**common_args
),
)
add_docstr(
torch.amax,
r"""
amax(input, dim, keepdim=False, *, out=None) -> Tensor
Returns the maximum value of each slice of the :attr:`input` tensor in the given
dimension(s) :attr:`dim`.
.. note::
The difference between ``max``/``min`` and ``amax``/``amin`` is:
- ``amax``/``amin`` supports reducing on multiple dimensions,
- ``amax``/``amin`` does not return indices,
- ``amax``/``amin`` evenly distributes gradient between equal values,
while ``max(dim)``/``min(dim)`` propagates gradient only to a single
index in the source tensor.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.8177, 1.4878, -0.2491, 0.9130],
[-0.7158, 1.1775, 2.0992, 0.4817],
[-0.0053, 0.0164, -1.3738, -0.0507],
[ 1.9700, 1.1106, -1.0318, -1.0816]])
>>> torch.amax(a, 1)
tensor([1.4878, 2.0992, 0.0164, 1.9700])
""".format(
**multi_dim_common
),
)
add_docstr(
torch.argmax,
r"""
argmax(input) -> LongTensor
Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
.. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
Args:
{input}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a)
tensor(0)
.. function:: argmax(input, dim, keepdim=False) -> LongTensor
:noindex:
Returns the indices of the maximum values of a tensor across a dimension.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
Args:
{input}
{dim} If ``None``, the argmax of the flattened input is returned.
{keepdim} Ignored if ``dim=None``.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a, dim=1)
tensor([ 0, 2, 0, 1])
""".format(
**single_dim_common
),
)
add_docstr(
torch.argwhere,
r"""
argwhere(input) -> Tensor
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
.. note::
This function is similar to NumPy's `argwhere`.
When :attr:`input` is on CUDA, this function causes host-device synchronization.
Args:
{input}
Example::
>>> t = torch.tensor([1, 0, 1])
>>> torch.argwhere(t)
tensor([[0],
[2]])
>>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
>>> torch.argwhere(t)
tensor([[0, 0],
[0, 2],
[1, 1],
[1, 2]])
""",
)
add_docstr(
torch.mean,
r"""
mean(input, *, dtype=None) -> Tensor
Returns the mean value of all elements in the :attr:`input` tensor.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.2294, -0.5481, 1.3288]])
>>> torch.mean(a)
tensor(0.3367)
.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
:noindex:
Returns the mean value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{dtype}
{out}
.. seealso::
:func:`torch.nanmean` computes the mean value of `non-NaN` elements.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
[-0.9644, 1.0131, -0.6549, -1.4279],
[-0.2951, -1.3350, -0.7694, 0.5600],
[ 1.0842, -0.9580, 0.3623, 0.2343]])
>>> torch.mean(a, 1)
tensor([-0.0163, -0.5085, -0.4599, 0.1807])
>>> torch.mean(a, 1, True)
tensor([[-0.0163],
[-0.5085],
[-0.4599],
[ 0.1807]])
""".format(
**multi_dim_common
),
)
add_docstr(
torch.nanmean,
r"""
nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
Computes the mean of all `non-NaN` elements along the specified dimensions.
This function is identical to :func:`torch.mean` when there are no `NaN` values
in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
{keepdim_details}
Args:
{input}
{opt_dim}
{keepdim}
Keyword args:
{dtype}
{out}
.. seealso::
:func:`torch.mean` computes the mean value, propagating `NaN`.
Example::
>>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
>>> x.mean()
tensor(nan)
>>> x.nanmean()
tensor(1.8000)
>>> x.mean(dim=0)
tensor([ nan, 1.5000, 2.5000])
>>> x.nanmean(dim=0)
tensor([1.0000, 1.5000, 2.5000])
# If all elements in the reduced dimensions are NaN then the result is NaN
>>> torch.tensor([torch.nan]).nanmean()
tensor(nan)
""".format(
**multi_dim_common
),
)
add_docstr(
torch.median,
r"""
median(input) -> Tensor
Returns the median of the values in :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements. In this case the lower of the two medians is returned. To
compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
This function produces deterministic (sub)gradients unlike ``median(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 1.5219, -1.5212, 0.2202]])
>>> torch.median(a)
tensor(0.2202)
.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
.. note::
The median is not unique for :attr:`input` tensors with an even number
of elements in the dimension :attr:`dim`. In this case the lower of the
two medians is returned. To compute the mean of both medians in
:attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
.. warning::
``indices`` does not necessarily contain the first occurrence of each
median value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.randn(4, 5)
>>> a
tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
[ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
[-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
[ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
>>> torch.median(a, 1)
torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
""".format(
**single_dim_common
),
)
add_docstr(
torch.nanmedian,
r"""
nanmedian(input) -> Tensor
Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
while this function will return the median of the non-``NaN`` elements in :attr:`input`.
If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
Args:
{input}
Example::
>>> a = torch.tensor([1, float('nan'), 3, 2])
>>> a.median()
tensor(nan)
>>> a.nanmedian()
tensor(2.)
.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
found in the dimension :attr:`dim`.
This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
tensor, which must have dtype long, with their indices in the dimension
:attr:`dim` of :attr:`input`.
Example::
>>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
>>> a
tensor([[2., 3., 1.],
[nan, 1., nan]])
>>> a.median(0)
torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
>>> a.nanmedian(0)
torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
""".format(
**single_dim_common
),
)
add_docstr(
torch.quantile,
r"""
quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
indices ``i`` and ``j`` in the sorted order, result is computed according to the given
:attr:`interpolation` method as follows:
- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
- ``lower``: ``a``.
- ``higher``: ``b``.
- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
- ``midpoint``: ``(a + b) / 2``.
If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
.. note::
By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
Args:
{input}
q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
{dim}
{keepdim}
Keyword arguments:
interpolation (str): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
{out}
Example::
>>> a = torch.randn(2, 3)
>>> a
tensor([[ 0.0795, -1.2117, 0.9765],
[ 1.1707, 0.6706, 0.4884]])
>>> q = torch.tensor([0.25, 0.5, 0.75])
>>> torch.quantile(a, q, dim=1, keepdim=True)
tensor([[[-0.5661],
[ 0.5795]],
[[ 0.0795],
[ 0.6706]],
[[ 0.5280],
[ 0.9206]]])
>>> torch.quantile(a, q, dim=1, keepdim=True).shape
torch.Size([3, 2, 1])
>>> a = torch.arange(4.)
>>> a
tensor([0., 1., 2., 3.])
>>> torch.quantile(a, 0.6, interpolation='linear')
tensor(1.8000)
>>> torch.quantile(a, 0.6, interpolation='lower')
tensor(1.)
>>> torch.quantile(a, 0.6, interpolation='higher')
tensor(2.)
>>> torch.quantile(a, 0.6, interpolation='midpoint')
tensor(1.5000)
>>> torch.quantile(a, 0.6, interpolation='nearest')
tensor(2.)
>>> torch.quantile(a, 0.4, interpolation='nearest')
tensor(1.)
""".format(
**single_dim_common
),
)
add_docstr(
torch.nanquantile,
r"""
nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
not exist. If all values in a reduced row are ``NaN`` then the quantiles for
that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
Args:
{input}
q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
{dim}
{keepdim}
Keyword arguments:
interpolation (str): interpolation method to use when the desired quantile lies between two data points.
Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
Default is ``linear``.
{out}
Example::
>>> t = torch.tensor([float('nan'), 1, 2])
>>> t.quantile(0.5)
tensor(nan)
>>> t.nanquantile(0.5)
tensor(1.5000)
>>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
>>> t
tensor([[nan, nan],
[1., 2.]])
>>> t.nanquantile(0.5, dim=0)
tensor([1., 2.])
>>> t.nanquantile(0.5, dim=1)
tensor([ nan, 1.5000])
""".format(
**single_dim_common
),
)
add_docstr(
torch.min,
r"""
min(input) -> Tensor
Returns the minimum value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``min(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6750, 1.0857, 1.7197]])
>>> torch.min(a)
tensor(0.6750)
.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
:noindex:
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each minimum value found
(argmin).
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: If there are multiple minimal values in a reduced row then
the indices of the first minimal value are returned.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
[-1.4644, -0.2635, -0.3651, 0.6134],
[ 0.2457, 0.0384, 1.0128, 0.7015],
[-0.1153, 2.9849, 2.1458, 0.5788]])
>>> torch.min(a, 1)
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
.. function:: min(input, other, *, out=None) -> Tensor
:noindex:
See :func:`torch.minimum`.
""".format(
**single_dim_common
),
)
add_docstr(
torch.minimum,
r"""
minimum(input, other, *, out=None) -> Tensor
Computes the element-wise minimum of :attr:`input` and :attr:`other`.
.. note::
If one of the elements being compared is a NaN, then that element is returned.
:func:`minimum` is not supported for tensors with complex dtypes.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor((1, 2, -1))
>>> b = torch.tensor((3, 0, 4))
>>> torch.minimum(a, b)
tensor([1, 0, -1])
""".format(
**common_args
),
)
add_docstr(
torch.fmin,
r"""
fmin(input, other, *, out=None) -> Tensor
Computes the element-wise minimum of :attr:`input` and :attr:`other`.
This is like :func:`torch.minimum` except it handles NaNs differently:
if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
Only if both elements are NaN is NaN propagated.
This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
Args:
{input}
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
>>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
>>> torch.fmin(a, b)
tensor([-9.3000, 0.1000, 2.1000, nan])
""".format(
**common_args
),
)
add_docstr(
torch.amin,
r"""
amin(input, dim, keepdim=False, *, out=None) -> Tensor
Returns the minimum value of each slice of the :attr:`input` tensor in the given
dimension(s) :attr:`dim`.
.. note::
The difference between ``max``/``min`` and ``amax``/``amin`` is:
- ``amax``/``amin`` supports reducing on multiple dimensions,
- ``amax``/``amin`` does not return indices,
- ``amax``/``amin`` evenly distributes gradient between equal values,
while ``max(dim)``/``min(dim)`` propagates gradient only to a single
index in the source tensor.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.6451, -0.4866, 0.2987, -1.3312],
[-0.5744, 1.2980, 1.8397, -0.2713],
[ 0.9128, 0.9214, -1.7268, -0.2995],
[ 0.9023, 0.4853, 0.9075, -1.6165]])
>>> torch.amin(a, 1)
tensor([-1.3312, -0.5744, -1.7268, -1.6165])
""".format(
**multi_dim_common
),
)
add_docstr(
torch.aminmax,
r"""
aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
Computes the minimum and maximum values of the :attr:`input` tensor.
Args:
input (Tensor):
The input tensor
Keyword Args:
dim (Optional[int]):
The dimension along which to compute the values. If `None`,
computes the values over the entire :attr:`input` tensor.
Default is `None`.
keepdim (bool):
If `True`, the reduced dimensions will be kept in the output
tensor as dimensions with size 1 for broadcasting, otherwise
they will be removed, as if calling (:func:`torch.squeeze`).
Default is `False`.
out (Optional[Tuple[Tensor, Tensor]]):
Optional tensors on which to write the result. Must have the same
shape and dtype as the expected output.
Default is `None`.
Returns:
A named tuple `(min, max)` containing the minimum and maximum values.
Raises:
RuntimeError
If any of the dimensions to compute the values over has size 0.
.. note::
NaN values are propagated to the output if at least one value is NaN.
.. seealso::
:func:`torch.amin` computes just the minimum value
:func:`torch.amax` computes just the maximum value
Example::
>>> torch.aminmax(torch.tensor([1, -3, 5]))
torch.return_types.aminmax(
min=tensor(-3),
max=tensor(5))
>>> # aminmax propagates NaNs
>>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
torch.return_types.aminmax(
min=tensor(nan),
max=tensor(nan))
>>> t = torch.arange(10).view(2, 5)
>>> t
tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> t.aminmax(dim=0, keepdim=True)
torch.return_types.aminmax(
min=tensor([[0, 1, 2, 3, 4]]),
max=tensor([[5, 6, 7, 8, 9]]))
""",
)
add_docstr(
torch.argmin,
r"""
argmin(input, dim=None, keepdim=False) -> LongTensor
Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
This is the second value returned by :meth:`torch.min`. See its
documentation for the exact semantics of this method.
.. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
Args:
{input}
{dim} If ``None``, the argmin of the flattened input is returned.
{keepdim}.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
[ 1.0100, -1.1975, -0.0102, -0.4732],
[-0.9240, 0.1207, -0.7506, -1.0213],
[ 1.7809, -1.2960, 0.9384, 0.1438]])
>>> torch.argmin(a)
tensor(13)
>>> torch.argmin(a, dim=1)
tensor([ 2, 1, 3, 1])
>>> torch.argmin(a, dim=1, keepdim=True)
tensor([[2],
[1],
[3],
[1]])
""".format(
**single_dim_common
),
)
add_docstr(
torch.mm,
r"""
mm(input, mat2, *, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Supports strided and sparse 2-D tensors as inputs, autograd with
respect to strided inputs.
{tf32_note}
{rocm_fp16_note}
Args:
input (Tensor): the first matrix to be matrix multiplied
mat2 (Tensor): the second matrix to be matrix multiplied
Keyword args:
{out}
Example::
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.mm(mat1, mat2)
tensor([[ 0.4851, 0.5037, -0.3633],
[-0.0760, -3.6705, 2.4784]])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
),
)
add_docstr(
torch.hspmm,
r"""
hspmm(mat1, mat2, *, out=None) -> Tensor
Performs a matrix multiplication of a :ref:`sparse COO matrix
<sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
<sparse-hybrid-coo-docs>`.
Args:
mat1 (Tensor): the first sparse matrix to be matrix multiplied
mat2 (Tensor): the second strided matrix to be matrix multiplied
Keyword args:
{out}
""".format(
**common_args
),
)
add_docstr(
torch.matmul,
r"""
matmul(input, other, *, out=None) -> Tensor
Matrix product of two tensors.
The behavior depends on the dimensionality of the tensors as follows:
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If both arguments are 2-dimensional, the matrix-matrix product is returned.
- If the first argument is 1-dimensional and the second argument is 2-dimensional,
a 1 is prepended to its dimension for the purpose of the matrix multiply.
After the matrix multiply, the prepended dimension is removed.
- If the first argument is 2-dimensional and the second argument is 1-dimensional,
the matrix-vector product is returned.
- If both arguments are at least 1-dimensional and at least one argument is
N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
batched matrix multiply and removed after. If the second argument is 1-dimensional, a
1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
must be broadcastable). For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
{tf32_note}
{rocm_fp16_note}
.. note::
The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
Arguments:
input (Tensor): the first tensor to be multiplied
other (Tensor): the second tensor to be multiplied
Keyword args:
{out}
Example::
>>> # vector x vector
>>> tensor1 = torch.randn(3)
>>> tensor2 = torch.randn(3)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([])
>>> # matrix x vector
>>> tensor1 = torch.randn(3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([3])
>>> # batched matrix x broadcasted vector
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3])
>>> # batched matrix x batched matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(10, 4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
>>> # batched matrix x broadcasted matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
""".format(
**common_args, **tf32_notes, **rocm_fp16_notes
),
)
add_docstr(
torch.mode,
r"""
mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`, i.e. a value which appears most often
in that row, and ``indices`` is the index location of each mode value found.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
Args:
{input}
{dim}
{keepdim}
Keyword args:
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randint(10, (5,))
>>> a
tensor([6, 5, 1, 0, 2])
>>> b = a + (torch.randn(50, 1) * 5).long()
>>> torch.mode(b, 0)
torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2]))
""".format(
**single_dim_common
),
)
add_docstr(
torch.mul,
r"""
mul(input, other, *, out=None) -> Tensor
Multiplies :attr:`input` by :attr:`other`.
.. math::
\text{out}_i = \text{input}_i \times \text{other}_i
"""
+ r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
{input}
other (Tensor or Number) - the tensor or number to multiply input by.
Keyword args:
{out}
Examples::
>>> a = torch.randn(3)
>>> a
tensor([ 0.2015, -0.4255, 2.6087])
>>> torch.mul(a, 100)
tensor([ 20.1494, -42.5491, 260.8663])
>>> b = torch.randn(4, 1)
>>> b
tensor([[ 1.1207],
[-0.3137],
[ 0.0700],
[ 0.8378]])
>>> c = torch.randn(1, 4)
>>> c
tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
>>> torch.mul(b, c)
tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
[-0.1614, -0.0382, 0.1645, -0.7021],
[ 0.0360, 0.0085, -0.0367, 0.1567],
[ 0.4312, 0.1019, -0.4394, 1.8753]])
""".format(
**common_args
),
)
add_docstr(
torch.multiply,
r"""
multiply(input, other, *, out=None)
Alias for :func:`torch.mul`.
""",
)
add_docstr(
torch.multinomial,
r"""
multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
Returns a tensor where each row contains :attr:`num_samples` indices sampled
from the multinomial probability distribution located in the corresponding row
of tensor :attr:`input`.
.. note::
The rows of :attr:`input` do not need to sum to one (in which case we use
the values as weights), but must be non-negative, finite and have
a non-zero sum.
Indices are ordered from left to right according to when each was sampled
(first samples are placed in first column).
If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
:math:`(m \times \text{{num\_samples}})`.
If replacement is ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
.. note::
When drawn without replacement, :attr:`num_samples` must be lower than
number of non-zero elements in :attr:`input` (or the min number of non-zero
elements in each row of :attr:`input` if it is a matrix).
Args:
input (Tensor): the input tensor containing probabilities
num_samples (int): number of samples to draw
replacement (bool, optional): whether to draw with replacement or not
Keyword args:
{generator}
{out}
Example::
>>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
>>> torch.multinomial(weights, 2)
tensor([1, 2])
>>> torch.multinomial(weights, 4) # ERROR!
RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
>>> torch.multinomial(weights, 4, replacement=True)
tensor([ 2, 1, 1, 1])
""".format(
**common_args
),
)
add_docstr(
torch.mv,
r"""
mv(input, vec, *, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`input` and the vector
:attr:`vec`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): matrix to be multiplied
vec (Tensor): vector to be multiplied
Keyword args:
{out}
Example::
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.mv(mat, vec)
tensor([ 1.0404, -0.6361])
""".format(
**common_args
),
)
add_docstr(
torch.mvlgamma,
r"""
mvlgamma(input, p, *, out=None) -> Tensor
Alias for :func:`torch.special.multigammaln`.
""",
)
add_docstr(
torch.movedim,
r"""
movedim(input, source, destination) -> Tensor
Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
to the position(s) in :attr:`destination`.
Other dimensions of :attr:`input` that are not explicitly moved remain in
their original order and appear at the positions not specified in :attr:`destination`.
Args:
{input}
source (int or tuple of ints): Original positions of the dims to move. These must be unique.
destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.movedim(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.movedim(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.movedim(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.movedim(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
""".format(
**common_args
),
)
add_docstr(
torch.moveaxis,
r"""
moveaxis(input, source, destination) -> Tensor
Alias for :func:`torch.movedim`.
This function is equivalent to NumPy's moveaxis function.
Examples::
>>> t = torch.randn(3,2,1)
>>> t
tensor([[[-0.3362],
[-0.8437]],
[[-0.9627],
[ 0.1727]],
[[ 0.5173],
[-0.1398]]])
>>> torch.moveaxis(t, 1, 0).shape
torch.Size([2, 3, 1])
>>> torch.moveaxis(t, 1, 0)
tensor([[[-0.3362],
[-0.9627],
[ 0.5173]],
[[-0.8437],
[ 0.1727],
[-0.1398]]])
>>> torch.moveaxis(t, (1, 2), (0, 1)).shape
torch.Size([2, 1, 3])
>>> torch.moveaxis(t, (1, 2), (0, 1))
tensor([[[-0.3362, -0.9627, 0.5173]],
[[-0.8437, 0.1727, -0.1398]]])
""".format(
**common_args
),
)
add_docstr(
torch.swapdims,
r"""
swapdims(input, dim0, dim1) -> Tensor
Alias for :func:`torch.transpose`.
This function is equivalent to NumPy's swapaxes function.
Examples::
>>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.swapdims(x, 0, 1)
tensor([[[0, 1],
[4, 5]],
[[2, 3],
[6, 7]]])
>>> torch.swapdims(x, 0, 2)
tensor([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
""".format(
**common_args
),
)
add_docstr(
torch.swapaxes,
r"""
swapaxes(input, axis0, axis1) -> Tensor
Alias for :func:`torch.transpose`.
This function is equivalent to NumPy's swapaxes function.
Examples::
>>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.swapaxes(x, 0, 1)
tensor([[[0, 1],
[4, 5]],
[[2, 3],
[6, 7]]])
>>> torch.swapaxes(x, 0, 2)
tensor([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
""".format(
**common_args
),
)
add_docstr(
torch.narrow,
r"""
narrow(input, dim, start, length) -> Tensor
Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
returned tensor and :attr:`input` tensor share the same underlying storage.
Args:
input (Tensor): the tensor to narrow
dim (int): the dimension along which to narrow
start (int): the starting dimension
length (int): the distance to the ending dimension
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> torch.narrow(x, 0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> torch.narrow(x, 1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
""",
)
add_docstr(
torch.nan_to_num,
r"""
nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
greatest finite value representable by :attr:`input`'s dtype, and negative infinity
is replaced with the least finite value representable by :attr:`input`'s dtype.
Args:
{input}
nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
posinf (Number, optional): if a Number, the value to replace positive infinity values with.
If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
Default is None.
neginf (Number, optional): if a Number, the value to replace negative infinity values with.
If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
Default is None.
Keyword args:
{out}
Example::
>>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
>>> torch.nan_to_num(x)
tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
>>> torch.nan_to_num(x, nan=2.0)
tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00])
>>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00])
""".format(
**common_args
),
)
add_docstr(
torch.ne,
r"""
ne(input, other, *, out=None) -> Tensor
Computes :math:`\text{input} \neq \text{other}` element-wise.
"""
+ r"""
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
Keyword args:
{out}
Returns:
A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
Example::
>>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [True, False]])
""".format(
**common_args
),
)
add_docstr(
torch.not_equal,
r"""
not_equal(input, other, *, out=None) -> Tensor
Alias for :func:`torch.ne`.
""",
)
add_docstr(
torch.neg,
r"""
neg(input, *, out=None) -> Tensor
Returns a new tensor with the negative of the elements of :attr:`input`.
.. math::
\text{out} = -1 \times \text{input}
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
>>> torch.neg(a)
tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
""".format(
**common_args
),
)
add_docstr(
torch.negative,
r"""
negative(input, *, out=None) -> Tensor
Alias for :func:`torch.neg`
""",
)
add_docstr(
torch.nextafter,
r"""
nextafter(input, other, *, out=None) -> Tensor
Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
Keyword args:
{out}
Example::
>>> eps = torch.finfo(torch.float32).eps
>>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
tensor([True, True])
""".format(
**common_args
),
)
add_docstr(
torch.nonzero,
r"""
nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
.. note::
:func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
2-D tensor where each row is the index for a nonzero value.
:func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
contains nonzero indices for a certain dimension.
See below for more details on the two behaviors.
When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
host-device synchronization.
**When** :attr:`as_tuple` **is** ``False`` **(default)**:
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
**When** :attr:`as_tuple` **is** ``True``:
Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
each containing the indices (in that dimension) of all non-zero elements of
:attr:`input` .
If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
tensors of size :math:`z`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.
Args:
{input}
Keyword args:
out (LongTensor, optional): the output tensor containing indices
Returns:
LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
each dimension, containing the indices of each nonzero element along that
dimension.
Example::
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
tensor([[ 0],
[ 1],
[ 2],
[ 4]])
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]))
tensor([[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3]])
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
(tensor([0, 1, 2, 4]),)
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
(tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
>>> torch.nonzero(torch.tensor(5), as_tuple=True)
(tensor([0]),)
""".format(
**common_args
),
)
add_docstr(
torch.normal,
r"""
normal(mean, std, *, generator=None, out=None) -> Tensor
Returns a tensor of random numbers drawn from separate normal distributions
whose mean and standard deviation are given.
The :attr:`mean` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`mean`
is used as the shape for the returned output tensor
.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
its device with the CPU.
Args:
mean (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
Keyword args:
{generator}
{out}
Example::
>>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
8.0505, 8.1408, 9.0563, 10.0566])
.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means are shared among all drawn
elements.
Args:
mean (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
Keyword args:
{out}
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the standard deviations are shared among
all drawn elements.
Args:
mean (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
Keyword args:
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=torch.arange(1., 6.))
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
.. function:: normal(mean, std, size, *, out=None) -> Tensor
:noindex:
Similar to the function above, but the means and standard deviations are shared
among all drawn elements. The resulting tensor has size given by :attr:`size`.
Args:
mean (float): the mean for all distributions
std (float): the standard deviation for all distributions
size (int...): a sequence of integers defining the shape of the output tensor.
Keyword args:
{out}
Example::
>>> torch.normal(2, 3, size=(1, 4))
tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
""".format(
**common_args
),
)
add_docstr(
torch.numel,
r"""
numel(input) -> int
Returns the total number of elements in the :attr:`input` tensor.
Args:
{input}
Example::
>>> a = torch.randn(1, 2, 3, 4, 5)
>>> torch.numel(a)
120
>>> a = torch.zeros(4,4)
>>> torch.numel(a)
16
""".format(
**common_args
),
)
add_docstr(
torch.ones,
r"""
ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword arguments:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.ones(2, 3)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> torch.ones(5)
tensor([ 1., 1., 1., 1., 1.])
""".format(
**factory_common_args
),
)
add_docstr(
torch.ones_like,
r"""
ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `1`, with the same size as
:attr:`input`. ``torch.ones_like(input)`` is equivalent to
``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.ones_like(input, out=output)`` is equivalent to
``torch.ones(input.size(), out=output)``.
Args:
{input}
Keyword arguments:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> input = torch.empty(2, 3)
>>> torch.ones_like(input)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
""".format(
**factory_like_common_args
),
)
add_docstr(
torch.orgqr,
r"""
orgqr(input, tau) -> Tensor
Alias for :func:`torch.linalg.householder_product`.
""",
)
add_docstr(
torch.ormqr,
r"""
ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
where `Q` is represented using Householder reflectors `(input, tau)`.
See `Representation of Orthogonal or Unitary Matrices`_ for further details.
If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
It has size :math:`n \times n` otherwise.
If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
.. seealso::
:func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
from the QR decomposition.
Args:
input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
and `mn` equals to `m` or `n` depending on the :attr:`left`.
tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
left (bool): controls the order of multiplication.
transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
Keyword args:
out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
.. _Representation of Orthogonal or Unitary Matrices:
https://www.netlib.org/lapack/lug/node128.html
""",
)
add_docstr(
torch.permute,
r"""
permute(input, dims) -> Tensor
Returns a view of the original tensor :attr:`input` with its dimensions permuted.
Args:
{input}
dims (tuple of int): The desired ordering of dimensions
Example:
>>> x = torch.randn(2, 3, 5)
>>> x.size()
torch.Size([2, 3, 5])
>>> torch.permute(x, (2, 0, 1)).size()
torch.Size([5, 2, 3])
""".format(
**common_args
),
)
add_docstr(
torch.poisson,
r"""
poisson(input, generator=None) -> Tensor
Returns a tensor of the same size as :attr:`input` with each element
sampled from a Poisson distribution with rate parameter given by the corresponding
element in :attr:`input` i.e.,
.. math::
\text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
Args:
input (Tensor): the input tensor containing the rates of the Poisson distribution
Keyword args:
{generator}
Example::
>>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
>>> torch.poisson(rates)
tensor([[9., 1., 3., 5.],
[8., 6., 6., 0.],
[0., 4., 5., 3.],
[2., 1., 4., 2.]])
""".format(
**common_args
),
)
add_docstr(
torch.polygamma,
r"""
polygamma(n, input, *, out=None) -> Tensor
Alias for :func:`torch.special.polygamma`.
""",
)
add_docstr(
torch.positive,
r"""
positive(input) -> Tensor
Returns :attr:`input`.
Throws a runtime error if :attr:`input` is a bool tensor.
"""
+ r"""
Args:
{input}
Example::
>>> t = torch.randn(5)
>>> t
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
>>> torch.positive(t)
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
""".format(
**common_args
),
)
add_docstr(
torch.pow,
r"""
pow(input, exponent, *, out=None) -> Tensor
Takes the power of each element in :attr:`input` with :attr:`exponent` and
returns a tensor with the result.
:attr:`exponent` can be either a single ``float`` number or a `Tensor`
with the same number of elements as :attr:`input`.
When :attr:`exponent` is a scalar value, the operation applied is:
.. math::
\text{out}_i = x_i ^ \text{exponent}
When :attr:`exponent` is a tensor, the operation applied is:
.. math::
\text{out}_i = x_i ^ {\text{exponent}_i}
"""
+ r"""
When :attr:`exponent` is a tensor, the shapes of :attr:`input`
and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
{input}
exponent (float or tensor): the exponent value
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
>>> torch.pow(a, 2)
tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
>>> exp = torch.arange(1., 5.)
>>> a = torch.arange(1., 5.)
>>> a
tensor([ 1., 2., 3., 4.])
>>> exp
tensor([ 1., 2., 3., 4.])
>>> torch.pow(a, exp)
tensor([ 1., 4., 27., 256.])
.. function:: pow(self, exponent, *, out=None) -> Tensor
:noindex:
:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
The operation applied is:
.. math::
\text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
Args:
self (float): the scalar base value for the power operation
exponent (Tensor): the exponent tensor
Keyword args:
{out}
Example::
>>> exp = torch.arange(1., 5.)
>>> base = 2
>>> torch.pow(base, exp)
tensor([ 2., 4., 8., 16.])
""".format(
**common_args
),
)
add_docstr(
torch.float_power,
r"""
float_power(input, exponent, *, out=None) -> Tensor
Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
If neither input is complex returns a ``torch.float64`` tensor,
and if one or more inputs is complex returns a ``torch.complex128`` tensor.
.. note::
This function always computes in double precision, unlike :func:`torch.pow`,
which implements more typical :ref:`type promotion <type-promotion-doc>`.
This is useful when the computation needs to be performed in a wider or more precise dtype,
or the results of the computation may contain fractional values not representable in the input dtypes,
like when an integer base is raised to a negative integer exponent.
Args:
input (Tensor or Number): the base value(s)
exponent (Tensor or Number): the exponent value(s)
Keyword args:
{out}
Example::
>>> a = torch.randint(10, (4,))
>>> a
tensor([6, 4, 7, 1])
>>> torch.float_power(a, 2)
tensor([36., 16., 49., 1.], dtype=torch.float64)
>>> a = torch.arange(1, 5)
>>> a
tensor([ 1, 2, 3, 4])
>>> exp = torch.tensor([2, -3, 4, -5])
>>> exp
tensor([ 2, -3, 4, -5])
>>> torch.float_power(a, exp)
tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
""".format(
**common_args
),
)
add_docstr(
torch.prod,
r"""
prod(input, *, dtype=None) -> Tensor
Returns the product of all elements in the :attr:`input` tensor.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.8020, 0.5428, -1.5854]])
>>> torch.prod(a)
tensor(0.6902)
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the product of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(4, 2)
>>> a
tensor([[ 0.5261, -0.3837],
[ 1.1857, -0.2498],
[-1.1646, 0.0705],
[ 1.1131, -1.0629]])
>>> torch.prod(a, 1)
tensor([-0.2018, -0.2962, -0.0821, -1.1831])
""".format(
**single_dim_common
),
)
add_docstr(
torch.promote_types,
r"""
promote_types(type1, type2) -> dtype
Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
not smaller nor of lower kind than either `type1` or `type2`. See type promotion
:ref:`documentation <type-promotion-doc>` for more information on the type
promotion logic.
Args:
type1 (:class:`torch.dtype`)
type2 (:class:`torch.dtype`)
Example::
>>> torch.promote_types(torch.int32, torch.float32)
torch.float32
>>> torch.promote_types(torch.uint8, torch.long)
torch.long
""",
)
add_docstr(
torch.qr,
r"""
qr(input, some=True, *, out=None) -> (Tensor, Tensor)
Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
:math:`R` being an upper triangular matrix or batch of upper triangular matrices.
If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
.. warning::
:func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
replaced with a string parameter :attr:`mode`.
``Q, R = torch.qr(A)`` should be replaced with
.. code:: python
Q, R = torch.linalg.qr(A)
``Q, R = torch.qr(A, some=False)`` should be replaced with
.. code:: python
Q, R = torch.linalg.qr(A, mode="complete")
.. warning::
If you plan to backpropagate through QR, note that the current backward implementation
is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
columns of :attr:`input` are linearly independent.
This behavior will propably change once QR supports pivoting.
.. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
and may produce different (valid) decompositions on different device types
or different platforms.
Args:
input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
batch dimensions consisting of matrices of dimension :math:`m \times n`.
some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
complete QR decomposition. If `k = min(m, n)` then:
* ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
* ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
Keyword args:
out (tuple, optional): tuple of `Q` and `R` tensors.
The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
Example::
>>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> q, r = torch.qr(a)
>>> q
tensor([[-0.8571, 0.3943, 0.3314],
[-0.4286, -0.9029, -0.0343],
[ 0.2857, -0.1714, 0.9429]])
>>> r
tensor([[ -14.0000, -21.0000, 14.0000],
[ 0.0000, -175.0000, 70.0000],
[ 0.0000, 0.0000, -35.0000]])
>>> torch.mm(q, r).round()
tensor([[ 12., -51., 4.],
[ 6., 167., -68.],
[ -4., 24., -41.]])
>>> torch.mm(q.t(), q).round()
tensor([[ 1., 0., 0.],
[ 0., 1., -0.],
[ 0., -0., 1.]])
>>> a = torch.randn(3, 4, 5)
>>> q, r = torch.qr(a, some=False)
>>> torch.allclose(torch.matmul(q, r), a)
True
>>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
True
""",
)
add_docstr(
torch.rad2deg,
r"""
rad2deg(input, *, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in radians to degrees.
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
>>> torch.rad2deg(a)
tensor([[ 180.0233, -180.0233],
[ 359.9894, -359.9894],
[ 89.9544, -89.9544]])
""".format(
**common_args
),
)
add_docstr(
torch.deg2rad,
r"""
deg2rad(input, *, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in degrees to radians.
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
>>> torch.deg2rad(a)
tensor([[ 3.1416, -3.1416],
[ 6.2832, -6.2832],
[ 1.5708, -1.5708]])
""".format(
**common_args
),
)
add_docstr(
torch.heaviside,
r"""
heaviside(input, values, *, out=None) -> Tensor
Computes the Heaviside step function for each element in :attr:`input`.
The Heaviside step function is defined as:
.. math::
\text{{heaviside}}(input, values) = \begin{cases}
0, & \text{if input < 0}\\
values, & \text{if input == 0}\\
1, & \text{if input > 0}
\end{cases}
"""
+ r"""
Args:
{input}
values (Tensor): The values to use where :attr:`input` is zero.
Keyword arguments:
{out}
Example::
>>> input = torch.tensor([-1.5, 0, 2.0])
>>> values = torch.tensor([0.5])
>>> torch.heaviside(input, values)
tensor([0.0000, 0.5000, 1.0000])
>>> values = torch.tensor([1.2, -2.0, 3.5])
>>> torch.heaviside(input, values)
tensor([0., -2., 1.])
""".format(
**common_args
),
)
add_docstr(
torch.rand,
r"""
rand(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{generator}
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.rand_like,
r"""
rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a uniform distribution on the interval :math:`[0, 1)`.
``torch.rand_like(input)`` is equivalent to
``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(
**factory_like_common_args
),
)
add_docstr(
torch.randint,
"""
randint(low=0, high, size, \\*, generator=None, out=None, \
dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note::
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
Keyword args:
{generator}
{out}
dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
this function returns a tensor with dtype ``torch.int64``.
{layout}
{device}
{requires_grad}
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.randint_like,
"""
randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same shape as Tensor :attr:`input` filled with
random integers generated uniformly between :attr:`low` (inclusive) and
:attr:`high` (exclusive).
.. note:
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
{input}
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(
**factory_like_common_args
),
)
add_docstr(
torch.randn,
r"""
randn(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{generator}
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.randn_like,
r"""
randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a normal distribution with mean 0 and variance 1.
``torch.randn_like(input)`` is equivalent to
``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(
**factory_like_common_args
),
)
add_docstr(
torch.randperm,
"""
randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \
device=None, requires_grad=False, pin_memory=False) -> Tensor
"""
+ r"""
Returns a random permutation of integers from ``0`` to ``n - 1``.
Args:
n (int): the upper bound (exclusive)
Keyword args:
{generator}
{out}
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: ``torch.int64``.
{layout}
{device}
{requires_grad}
{pin_memory}
Example::
>>> torch.randperm(4)
tensor([2, 1, 0, 3])
""".format(
**factory_common_args
),
)
add_docstr(
torch.tensor,
r"""
tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
.. warning::
When working with tensors prefer using :func:`torch.Tensor.clone`,
:func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
is equivalent to ``t.clone().detach().requires_grad_(True)``.
.. seealso::
:func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
:func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
Args:
{data}
Keyword args:
{dtype}
device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
then the device of data is used. If None and data is not a tensor then
the result tensor is constructed on the CPU.
{requires_grad}
{pin_memory}
Example::
>>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
tensor([[ 0.1000, 1.2000],
[ 2.2000, 3.1000],
[ 4.9000, 5.2000]])
>>> torch.tensor([0, 1]) # Type inference on data
tensor([ 0, 1])
>>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
... dtype=torch.float64,
... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device
tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
>>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor
tensor(3.1416)
>>> torch.tensor([]) # Create an empty tensor (of size (0,))
tensor([])
""".format(
**factory_data_common_args
),
)
add_docstr(
torch.range,
r"""
range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\text{out}_{i+1} = \text{out}_i + \text{step}.
"""
+ r"""
.. warning::
This function is deprecated and will be removed in a future release because its behavior is inconsistent with
Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
Args:
start (float): the starting value for the set of points. Default: ``0``.
end (float): the ending value for the set of points
step (float): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
{out}
{dtype} If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
{layout}
{device}
{requires_grad}
Example::
>>> torch.range(1, 4)
tensor([ 1., 2., 3., 4.])
>>> torch.range(1, 4, 0.5)
tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
""".format(
**factory_common_args
),
)
add_docstr(
torch.arange,
r"""
arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise adding a small epsilon to :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
"""
+ r"""
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
{out}
{dtype} If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
{layout}
{device}
{requires_grad}
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
""".format(
**factory_common_args
),
)
add_docstr(
torch.ravel,
r"""
ravel(input) -> Tensor
Return a contiguous flattened tensor. A copy is made only if needed.
Args:
{input}
Example::
>>> t = torch.tensor([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> torch.ravel(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
""".format(
**common_args
),
)
add_docstr(
torch.remainder,
r"""
remainder(input, other, *, out=None) -> Tensor
Computes
`Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value
is less than that of :attr:`other`.
It may also be defined in terms of :func:`torch.div` as
.. code:: python
torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
.. note::
Complex inputs are not supported. In some cases, it is not mathematically
possible to satisfy the definition of a modulo operation with complex numbers.
See :func:`torch.fmod` for how division by zero is handled.
.. seealso::
:func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
This one is defined in terms of division rounding towards zero.
Args:
input (Tensor or Scalar): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
{out}
Example::
>>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([ 1., 0., 1., 1., 0., 1.])
>>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ])
""".format(
**common_args
),
)
add_docstr(
torch.renorm,
r"""
renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
Returns a tensor where each sub-tensor of :attr:`input` along dimension
:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
than the value :attr:`maxnorm`
.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
Args:
{input}
p (float): the power for the norm computation
dim (int): the dimension to slice over to get the sub-tensors
maxnorm (float): the maximum norm to keep each sub-tensor under
Keyword args:
{out}
Example::
>>> x = torch.ones(3, 3)
>>> x[1].fill_(2)
tensor([ 2., 2., 2.])
>>> x[2].fill_(3)
tensor([ 3., 3., 3.])
>>> x
tensor([[ 1., 1., 1.],
[ 2., 2., 2.],
[ 3., 3., 3.]])
>>> torch.renorm(x, 1, 0, 5)
tensor([[ 1.0000, 1.0000, 1.0000],
[ 1.6667, 1.6667, 1.6667],
[ 1.6667, 1.6667, 1.6667]])
""".format(
**common_args
),
)
add_docstr(
torch.reshape,
r"""
reshape(input, shape) -> Tensor
Returns a tensor with the same data and number of elements as :attr:`input`,
but with the specified shape. When possible, the returned tensor will be a view
of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
with compatible strides can be reshaped without copying, but you should not
depend on the copying vs. viewing behavior.
See :meth:`torch.Tensor.view` on when it is possible to return a view.
A single dimension may be -1, in which case it's inferred from the remaining
dimensions and the number of elements in :attr:`input`.
Args:
input (Tensor): the tensor to be reshaped
shape (tuple of int): the new shape
Example::
>>> a = torch.arange(4.)
>>> torch.reshape(a, (2, 2))
tensor([[ 0., 1.],
[ 2., 3.]])
>>> b = torch.tensor([[0, 1], [2, 3]])
>>> torch.reshape(b, (-1,))
tensor([ 0, 1, 2, 3])
""",
)
add_docstr(
torch.result_type,
r"""
result_type(tensor1, tensor2) -> dtype
Returns the :class:`torch.dtype` that would result from performing an arithmetic
operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
for more information on the type promotion logic.
Args:
tensor1 (Tensor or Number): an input tensor or number
tensor2 (Tensor or Number): an input tensor or number
Example::
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
torch.float32
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
torch.uint8
""",
)
add_docstr(
torch.row_stack,
r"""
row_stack(tensors, *, out=None) -> Tensor
Alias of :func:`torch.vstack`.
""",
)
add_docstr(
torch.round,
r"""
round(input, *, decimals=0, out=None) -> Tensor
Rounds elements of :attr:`input` to the nearest integer.
.. note::
This function implements the "round half to even" to
break ties when a number is equidistant from two
integers (e.g. `round(2.5)` is 2).
When the :attr:\`decimals\` argument is specified the
algorithm used is similar to NumPy's `around`. This
algorithm is fast but inexact and it can easily
overflow for low precision dtypes.
Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
.. seealso::
:func:`torch.ceil`, which rounds up.
:func:`torch.floor`, which rounds down.
:func:`torch.trunc`, which rounds towards zero.
Args:
{input}
decimals (int): Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions
to the left of the decimal point.
Keyword args:
{out}
Example::
>>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
tensor([ 5., -2., 9., -8.])
>>> # Values equidistant from two integers are rounded towards the
>>> # the nearest even value (zero is treated as even)
>>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
tensor([-0., 0., 2., 2.])
>>> # A positive decimals argument rounds to the to that decimal place
>>> torch.round(torch.tensor([0.1234567]), decimals=3)
tensor([0.1230])
>>> # A negative decimals argument rounds to the left of the decimal
>>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
tensor([1000.])
""".format(
**common_args
),
)
add_docstr(
torch.rsqrt,
r"""
rsqrt(input, *, out=None) -> Tensor
Returns a new tensor with the reciprocal of the square-root of each of
the elements of :attr:`input`.
.. math::
\text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.0370, 0.2970, 1.5420, -0.9105])
>>> torch.rsqrt(a)
tensor([ nan, 1.8351, 0.8053, nan])
""".format(
**common_args
),
)
add_docstr(
torch.scatter,
r"""
scatter(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
""",
)
add_docstr(
torch.scatter_add,
r"""
scatter_add(input, dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
""",
)
add_docstr(
torch.scatter_reduce,
r"""
scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
""",
)
add_docstr(
torch.select,
r"""
select(input, dim, index) -> Tensor
Slices the :attr:`input` tensor along the selected dimension at the given index.
This function returns a view of the original tensor with the given dimension removed.
Args:
{input}
dim (int): the dimension to slice
index (int): the index to select with
.. note::
:meth:`select` is equivalent to slicing. For example,
``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
""".format(
**common_args
),
)
add_docstr(
torch.select_scatter,
r"""
select_scatter(input, src, dim, index) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
This function returns a tensor with fresh storage; it does not create a view.
Args:
{input}
src (Tensor): The tensor to embed into :attr:`input`
dim (int): the dimension to insert the slice into.
index (int): the index to select with
.. note::
:attr:`src` must be of the proper size in order to be embedded
into :attr:`input`. Specifically, it should have the same shape as
``torch.select(input, dim, index)``
Example::
>>> a = torch.zeros(2, 2)
>>> b = torch.ones(2)
>>> a.select_scatter(b, 0, 0)
tensor([[1., 1.],
[0., 0.]])
""".format(
**common_args
),
)
add_docstr(
torch.slice_scatter,
r"""
slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
dimension.
This function returns a tensor with fresh storage; it does not create a view.
Args:
{input}
src (Tensor): The tensor to embed into :attr:`input`
dim (int): the dimension to insert the slice into
start (Optional[int]): the start index of where to insert the slice
end (Optional[int]): the end index of where to insert the slice
step (int): the how many elements to skip in
Example::
>>> a = torch.zeros(8, 8)
>>> b = torch.ones(8)
>>> a.slice_scatter(b, start=6)
tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1.]])
>>> b = torch.ones(2)
>>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.],
[0., 0., 1., 0., 1., 0., 0., 0.]])
""".format(
**common_args
),
)
add_docstr(
torch.set_flush_denormal,
r"""
set_flush_denormal(mode) -> bool
Disables denormal floating numbers on CPU.
Returns ``True`` if your system supports flushing denormal numbers and it
successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
is only supported on x86 architectures supporting SSE3.
Args:
mode (bool): Controls whether to enable flush denormal mode or not
Example::
>>> torch.set_flush_denormal(True)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor([ 0.], dtype=torch.float64)
>>> torch.set_flush_denormal(False)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor(9.88131e-324 *
[ 1.0000], dtype=torch.float64)
""",
)
add_docstr(
torch.set_num_threads,
r"""
set_num_threads(int)
Sets the number of threads used for intraop parallelism on CPU.
.. warning::
To ensure that the correct number of threads is used, set_num_threads
must be called before running eager, JIT or autograd code.
""",
)
add_docstr(
torch.set_num_interop_threads,
r"""
set_num_interop_threads(int)
Sets the number of threads used for interop parallelism
(e.g. in JIT interpreter) on CPU.
.. warning::
Can only be called once and before any inter-op parallel work
is started (e.g. JIT execution).
""",
)
add_docstr(
torch.sigmoid,
r"""
sigmoid(input, *, out=None) -> Tensor
Alias for :func:`torch.special.expit`.
""",
)
add_docstr(
torch.logit,
r"""
logit(input, eps=None, *, out=None) -> Tensor
Alias for :func:`torch.special.logit`.
""",
)
add_docstr(
torch.sign,
r"""
sign(input, *, out=None) -> Tensor
Returns a new tensor with the signs of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
>>> a
tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
>>> torch.sign(a)
tensor([ 1., -1., 0., 1.])
""".format(
**common_args
),
)
add_docstr(
torch.signbit,
r"""
signbit(input, *, out=None) -> Tensor
Tests if each element of :attr:`input` has its sign bit set or not.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
>>> torch.signbit(a)
tensor([ False, True, False, False])
>>> a = torch.tensor([-0.0, 0.0])
>>> torch.signbit(a)
tensor([ True, False])
.. note::
signbit handles signed zeros, so negative zero (-0) returns True.
""".format(
**common_args
),
)
add_docstr(
torch.sgn,
r"""
sgn(input, *, out=None) -> Tensor
This function is an extension of torch.sign() to complex tensors.
It computes a new tensor whose elements have
the same angles as the corresponding elements of :attr:`input` and
absolute values (i.e. magnitudes) of one for complex tensors and
is equivalent to torch.sign() for non-complex tensors.
.. math::
\text{out}_{i} = \begin{cases}
0 & |\text{{input}}_i| == 0 \\
\frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
\end{cases}
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
>>> t.sgn()
tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
""".format(
**common_args
),
)
add_docstr(
torch.sin,
r"""
sin(input, *, out=None) -> Tensor
Returns a new tensor with the sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5461, 0.1347, -2.7266, -0.2746])
>>> torch.sin(a)
tensor([-0.5194, 0.1343, -0.4032, -0.2711])
""".format(
**common_args
),
)
add_docstr(
torch.sinc,
r"""
sinc(input, *, out=None) -> Tensor
Alias for :func:`torch.special.sinc`.
""",
)
add_docstr(
torch.sinh,
r"""
sinh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic sine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \sinh(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
>>> torch.sinh(a)
tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
.. note::
When :attr:`input` is on the CPU, the implementation of torch.sinh may use
the Sleef library, which rounds very large results to infinity or negative
infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
""".format(
**common_args
),
)
add_docstr(
torch.sort,
r"""
sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements.
A namedtuple of (values, indices) is returned, where the `values` are the
sorted values and `indices` are the indices of the elements in the original
`input` tensor.
Args:
{input}
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): makes the sorting routine stable, which guarantees that the order
of equivalent elements is preserved.
Keyword args:
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
[-0.5793, 0.0061, 0.6058, 0.9497],
[-0.5071, 0.3343, 0.9553, 1.0960]])
>>> indices
tensor([[ 1, 0, 2, 3],
[ 3, 1, 0, 2],
[ 0, 3, 1, 2]])
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
[ 0.0608, 0.0061, 0.9497, 0.3343],
[ 0.6058, 0.9553, 1.0960, 2.3332]])
>>> indices
tensor([[ 2, 0, 0, 1],
[ 0, 1, 1, 2],
[ 1, 2, 2, 0]])
>>> x = torch.tensor([0, 1] * 9)
>>> x.sort()
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1]))
>>> x.sort(stable=True)
torch.return_types.sort(
values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17]))
""".format(
**common_args
),
)
add_docstr(
torch.argsort,
r"""
argsort(input, dim=-1, descending=False, stable=False) -> Tensor
Returns the indices that sort a tensor along a given dimension in ascending
order by value.
This is the second value returned by :meth:`torch.sort`. See its documentation
for the exact semantics of this method.
If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
the order of equivalent elements. If ``False``, the relative order of values
which compare equal is not guaranteed. ``True`` is slower.
Args:
{input}
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
stable (bool, optional): controls the relative order of equivalent elements
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
[ 0.1598, 0.0788, -0.0745, -1.2700],
[ 1.2208, 1.0722, -0.7064, 1.2564],
[ 0.0669, -0.2318, -0.8229, -0.9280]])
>>> torch.argsort(a, dim=1)
tensor([[2, 0, 3, 1],
[3, 2, 1, 0],
[2, 1, 0, 3],
[3, 2, 1, 0]])
""".format(
**common_args
),
)
add_docstr(
torch.msort,
r"""
msort(input, *, out=None) -> Tensor
Sorts the elements of the :attr:`input` tensor along its first dimension
in ascending order by value.
.. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
See also :func:`torch.sort`.
Args:
{input}
Keyword args:
{out}
Example::
>>> t = torch.randn(3, 4)
>>> t
tensor([[-0.1321, 0.4370, -1.2631, -1.1289],
[-2.0527, -1.1250, 0.2275, 0.3077],
[-0.0881, -0.1259, -0.5495, 1.0284]])
>>> torch.msort(t)
tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
[-0.1321, -0.1259, -0.5495, 0.3077],
[-0.0881, 0.4370, 0.2275, 1.0284]])
""".format(
**common_args
),
)
add_docstr(
torch.sparse_compressed_tensor,
r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """
r"""*, dtype=None, layout=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
matrix multiplication operations in Compressed Sparse format are
typically faster than that for sparse tensors in COO format. Make you
have a look at :ref:`the note on the data type of the indices
<sparse-compressed-docs>`.
Args:
compressed_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, compressed_dim_size + 1)``. The last element of
each batch is the number of non-zero elements or blocks. This
tensor encodes the index in ``values`` and ``plain_indices``
depending on where the given compressed dimension (row or
column) starts. Each successive number in the tensor
subtracted by the number before it denotes the number of
elements or blocks in a given compressed dimension.
plain_indices (array_like): Plain dimension (column or row)
co-ordinates of each element or block in values. (B+1)-dimensional
tensor with the same length as values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types. that
represents a (1+K)-dimensional or (1+2+K)-dimensional tensor
where ``K`` is the number of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
blocksize[1], *densesize)`` where ``blocksize[0] ==
blocksize[1] == 1`` for CSR and CSC formats. If not provided,
the size will be inferred as the minimum size big enough to
hold all non-zero elements or blocks.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
layout (:class:`torch.layout`, required): the desired layout of
returned tensor: :attr:`torch.sparse_csr`,
:attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
:attr:`torch.sparse_bsc`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_tensor_type`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
{requires_grad}
Example::
>>> compressed_indices = [0, 2, 4]
>>> plain_indices = [0, 1, 0, 1]
>>> values = [1, 2, 3, 4]
>>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
... torch.tensor(plain_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
tensor(crow_indices=tensor([0, 2, 4]),
col_indices=tensor([0, 1, 0, 1]),
values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
dtype=torch.float64, layout=torch.sparse_csr)
""".format(
**factory_common_args
),
)
add_docstr(
torch.sparse_csr_tensor,
r"""
sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
Args:
crow_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, nrows + 1)``. The last element of each batch
is the number of non-zeros. This tensor encodes the index in
values and col_indices depending on where the given row
starts. Each successive number in the tensor subtracted by the
number before it denotes the number of elements in a given
row.
col_indices (array_like): Column co-ordinates of each element in
values. (B+1)-dimensional tensor with the same length
as values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types that
represents a (1+K)-dimensonal tensor where ``K`` is the number
of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
not provided, the size will be inferred as the minimum size
big enough to hold all non-zero elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_tensor_type`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
{requires_grad}
Example::
>>> crow_indices = [0, 2, 4]
>>> col_indices = [0, 1, 0, 1]
>>> values = [1, 2, 3, 4]
>>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
... torch.tensor(col_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(crow_indices=tensor([0, 2, 4]),
col_indices=tensor([0, 1, 0, 1]),
values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
dtype=torch.float64, layout=torch.sparse_csr)
""".format(
**factory_common_args
),
)
add_docstr(
torch.sparse_csc_tensor,
r"""
sparse_csc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
<sparse-csc-docs>` with specified values at the given
:attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
multiplication operations in CSC format are typically faster than that
for sparse tensors in COO format. Make you have a look at :ref:`the
note on the data type of the indices <sparse-csc-docs>`.
Args:
ccol_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, ncols + 1)``. The last element of each batch
is the number of non-zeros. This tensor encodes the index in
values and row_indices depending on where the given column
starts. Each successive number in the tensor subtracted by the
number before it denotes the number of elements in a given
column.
row_indices (array_like): Row co-ordinates of each element in
values. (B+1)-dimensional tensor with the same length as
values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types that
represents a (1+K)-dimensonal tensor where ``K`` is the number
of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
not provided, the size will be inferred as the minimum size
big enough to hold all non-zero elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_tensor_type`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
{requires_grad}
Example::
>>> ccol_indices = [0, 2, 4]
>>> row_indices = [0, 1, 0, 1]
>>> values = [1, 2, 3, 4]
>>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
... torch.tensor(row_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(ccol_indices=tensor([0, 2, 4]),
row_indices=tensor([0, 1, 0, 1]),
values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
dtype=torch.float64, layout=torch.sparse_csc)
""".format(
**factory_common_args
),
)
add_docstr(
torch.sparse_bsr_tensor,
r"""
sparse_bsr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
<sparse-bsr-docs>` with specified 2-dimensional blocks at the given
:attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
multiplication operations in BSR format are typically faster than that
for sparse tensors in COO format. Make you have a look at :ref:`the
note on the data type of the indices <sparse-bsr-docs>`.
Args:
crow_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, nrowblocks + 1)``. The last element of each
batch is the number of non-zeros. This tensor encodes the
block index in values and col_indices depending on where the
given row block starts. Each successive number in the tensor
subtracted by the number before it denotes the number of
blocks in a given row.
col_indices (array_like): Column block co-ordinates of each block
in values. (B+1)-dimensional tensor with the same length as
values.
values (array_list): Initial values for the tensor. Can be a list,
tuple, NumPy ``ndarray``, scalar, and other types that
represents a (1 + 2 + K)-dimensonal tensor where ``K`` is the
number of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
blocksize[1], *densesize)`` where ``blocksize ==
values.shape[1:3]``. If not provided, the size will be
inferred as the minimum size big enough to hold all non-zero
blocks.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_tensor_type`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
{requires_grad}
Example::
>>> crow_indices = [0, 1, 2]
>>> col_indices = [0, 1]
>>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
... torch.tensor(col_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(crow_indices=tensor([0, 1, 2]),
col_indices=tensor([0, 1]),
values=tensor([[[1., 2.],
[3., 4.]],
[[5., 6.],
[7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
layout=torch.sparse_bsr)
""".format(
**factory_common_args
),
)
add_docstr(
torch.sparse_bsc_tensor,
r"""
sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
multiplication operations in BSC format are typically faster than that
for sparse tensors in COO format. Make you have a look at :ref:`the
note on the data type of the indices <sparse-bsc-docs>`.
Args:
ccol_indices (array_like): (B+1)-dimensional array of size
``(*batchsize, ncolblocks + 1)``. The last element of each
batch is the number of non-zeros. This tensor encodes the
index in values and row_indices depending on where the given
column starts. Each successive number in the tensor subtracted
by the number before it denotes the number of elements in a
given column.
row_indices (array_like): Row block co-ordinates of each block in
values. (B+1)-dimensional tensor with the same length
as values.
values (array_list): Initial blocks for the tensor. Can be a list,
tuple, NumPy ``ndarray``, and other types that
represents a (1 + 2 + K)-dimensonal tensor where ``K`` is the
number of dense dimensions.
size (list, tuple, :class:`torch.Size`, optional): Size of the
sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
blocksize[1], *densesize)`` If not provided, the size will be
inferred as the minimum size big enough to hold all non-zero
blocks.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. Default: if None, infers data type from
:attr:`values`.
device (:class:`torch.device`, optional): the desired device of
returned tensor. Default: if None, uses the current device
for the default tensor type (see
:func:`torch.set_default_tensor_type`). :attr:`device` will be
the CPU for CPU tensor types and the current CUDA device for
CUDA tensor types.
{requires_grad}
Example::
>>> ccol_indices = [0, 1, 2]
>>> row_indices = [0, 1]
>>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
... torch.tensor(row_indices, dtype=torch.int64),
... torch.tensor(values), dtype=torch.double)
tensor(ccol_indices=tensor([0, 1, 2]),
row_indices=tensor([0, 1]),
values=tensor([[[1., 2.],
[3., 4.]],
[[5., 6.],
[7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
layout=torch.sparse_bsc)
""".format(
**factory_common_args
),
)
add_docstr(
torch.sparse_coo_tensor,
r"""
sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a :ref:`sparse tensor in COO(rdinate) format
<sparse-coo-docs>` with specified values at the given
:attr:`indices`.
.. note::
This function returns an :ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
Args:
indices (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
internally. The indices are the coordinates of the non-zero values in the matrix, and thus
should be two-dimensional where the first dimension is the number of tensor dimensions and
the second dimension is the number of non-zero values.
values (array_like): Initial values for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
provided the size will be inferred as the minimum size big enough to hold all non-zero
elements.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if None, infers data type from :attr:`values`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
{requires_grad}
Example::
>>> i = torch.tensor([[0, 1, 1],
... [2, 0, 2]])
>>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
>>> torch.sparse_coo_tensor(i, v, [2, 4])
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 4), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v) # Shape inference
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 3), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v, [2, 4],
... dtype=torch.float64,
... device=torch.device('cuda:0'))
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
layout=torch.sparse_coo)
# Create an empty sparse tensor with the following invariants:
# 1. sparse_dim + dense_dim = len(SparseTensor.shape)
# 2. SparseTensor._indices().shape = (sparse_dim, nnz)
# 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
#
# For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
# sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0,)),
size=(1,), nnz=0, layout=torch.sparse_coo)
# and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
# sparse_dim = 1
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0, 2)),
size=(1, 2), nnz=0, layout=torch.sparse_coo)
.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
""".format(
**factory_common_args
),
)
add_docstr(
torch.sqrt,
r"""
sqrt(input, *, out=None) -> Tensor
Returns a new tensor with the square-root of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}}
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.sqrt(a)
tensor([ nan, 1.0112, 0.2883, 0.6933])
""".format(
**common_args
),
)
add_docstr(
torch.square,
r"""
square(input, *, out=None) -> Tensor
Returns a new tensor with the square of the elements of :attr:`input`.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.square(a)
tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
""".format(
**common_args
),
)
add_docstr(
torch.squeeze,
r"""
squeeze(input, dim=None, *, out=None) -> Tensor
Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension. If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
will also remove the batch dimension, which can lead to unexpected
errors.
Args:
{input}
dim (int, optional): if given, the input will be squeezed only in
this dimension
Keyword args:
{out}
Example::
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])
>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
""".format(
**common_args
),
)
add_docstr(
torch.std,
r"""
std(input, dim, unbiased, keepdim=False, *, out=None) -> Tensor
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
{dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
.. function:: std(input, unbiased) -> Tensor
:noindex:
Calculates the standard deviation of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.std(a, unbiased=False)
tensor(0.4188)
""".format(
**multi_dim_common
),
)
add_docstr(
torch.std_mean,
r"""
std_mean(input, dim, unbiased, keepdim=False, *, out=None) -> (Tensor, Tensor)
If :attr:`unbiased` is ``True``, Bessel's correction will be used to calculate
the standard deviation. Otherwise, the sample deviation is calculated, without
any correction.
Args:
{input}
{opt_dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
Returns:
A tuple (std, mean) containing the standard deviation and mean.
.. function:: std_mean(input, unbiased) -> (Tensor, Tensor)
:noindex:
Calculates the standard deviation and mean of all elements in the :attr:`input`
tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Returns:
A tuple (std, mean) containing the standard deviation and mean.
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.std_mean(a, unbiased=False)
(tensor(0.4188), tensor(-0.8509))
""".format(
**multi_dim_common
),
)
add_docstr(
torch.sub,
r"""
sub(input, other, *, alpha=1, out=None) -> Tensor
Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
.. math::
\text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
"""
+ r"""
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
Args:
{input}
other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
Keyword args:
alpha (Number): the multiplier for :attr:`other`.
{out}
Example::
>>> a = torch.tensor((1, 2))
>>> b = torch.tensor((0, 1))
>>> torch.sub(a, b, alpha=2)
tensor([1, 0])
""".format(
**common_args
),
)
add_docstr(
torch.subtract,
r"""
subtract(input, other, *, alpha=1, out=None) -> Tensor
Alias for :func:`torch.sub`.
""",
)
add_docstr(
torch.sum,
r"""
sum(input, *, dtype=None) -> Tensor
Returns the sum of all elements in the :attr:`input` tensor.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.1133, -0.9567, 0.2958]])
>>> torch.sum(a)
tensor(-0.5475)
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
Args:
{input}
{opt_dim}
{keepdim}
Keyword args:
{dtype}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
[-0.2993, 0.9138, 0.9337, -1.6864],
[ 0.1132, 0.7892, -0.1003, 0.5688],
[ 0.3637, -0.9906, -0.4752, -1.5197]])
>>> torch.sum(a, 1)
tensor([-0.4598, -0.1381, 1.3708, -2.6217])
>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
>>> torch.sum(b, (2, 1))
tensor([ 435., 1335., 2235., 3135.])
""".format(
**multi_dim_common
),
)
add_docstr(
torch.nansum,
r"""
nansum(input, *, dtype=None) -> Tensor
Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
Args:
{input}
Keyword args:
{dtype}
Example::
>>> a = torch.tensor([1., 2., float('nan'), 4.])
>>> torch.nansum(a)
tensor(7.)
.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
:noindex:
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
If :attr:`dim` is a list of dimensions, reduce over all of them.
{keepdim_details}
Args:
{input}
{opt_dim}
{keepdim}
Keyword args:
{dtype}
Example::
>>> torch.nansum(torch.tensor([1., float("nan")]))
1.0
>>> a = torch.tensor([[1, 2], [3., float("nan")]])
>>> torch.nansum(a)
tensor(6.)
>>> torch.nansum(a, dim=0)
tensor([4., 2.])
>>> torch.nansum(a, dim=1)
tensor([3., 3.])
""".format(
**multi_dim_common
),
)
add_docstr(
torch.svd,
r"""
svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
Computes the singular value decomposition of either a matrix or batch of
matrices :attr:`input`. The singular value decomposition is represented as a
namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
and the conjugate transpose of `V` for complex inputs.
If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
batched with the same batch dimensions as :attr:`input`.
If :attr:`some` is `True` (default), the method returns the reduced singular
value decomposition. In this case, if the last two dimensions of :attr:`input` are
`m` and `n`, then the returned `U` and `V` matrices will contain only
`min(n, m)` orthonormal columns.
If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
zero-filled matrices of shape `(m, m)` and `(n, n)`
respectively, and the same device as :attr:`input`. The argument :attr:`some`
has no effect when :attr:`compute_uv` is `False`.
Supports :attr:`input` of float, double, cfloat and cdouble data types.
The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
always be real-valued, even if :attr:`input` is complex.
.. warning::
:func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
and will be removed in a future PyTorch release.
``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
.. code:: python
U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
V = Vh.mH
``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
.. code:: python
S = torch.linalg.svdvals(A)
.. note:: Differences with :func:`torch.linalg.svd`:
* :attr:`some` is the opposite of
:func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
default value for both is `True`, so the default behavior is
effectively the opposite.
* :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
`Vh`, that is, :math:`V^{\text{H}}`.
* If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
empty tensors.
.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
then the singular values of each matrix in the batch are returned in descending order.
.. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
.. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
can be arbitrary bases of the corresponding subspaces.
.. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
(a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
.. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
be represented as a column-major matrix (i.e. Fortran-contiguous).
.. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
have zero nor repeated singular values.
.. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
`U` and `V` will be numerically unstable, as they depends on
:math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
has small singular values, as these gradients also depend on `S⁻¹`.
.. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
The same happens when :attr:`input` has repeated singular values, where one may multiply
the columns of the spanning subspace in `U` and `V` by a rotation matrix
and `the resulting vectors will span the same subspace`_.
Different platforms, like NumPy, or inputs on different device types,
may produce different `U` and `V` tensors.
Args:
input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
batch dimensions consisting of `(m, n)` matrices.
some (bool, optional): controls whether to compute the reduced or full decomposition, and
consequently, the shape of returned `U` and `V`. Default: `True`.
compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
Keyword args:
out (tuple, optional): the output tuple of tensors
Example::
>>> a = torch.randn(5, 3)
>>> a
tensor([[ 0.2364, -0.7752, 0.6372],
[ 1.7201, 0.7394, -0.0504],
[-0.3371, -1.0584, 0.5296],
[ 0.3550, -0.4022, 1.5569],
[ 0.2445, -0.0158, 1.1414]])
>>> u, s, v = torch.svd(a)
>>> u
tensor([[ 0.4027, 0.0287, 0.5434],
[-0.1946, 0.8833, 0.3679],
[ 0.4296, -0.2890, 0.5261],
[ 0.6604, 0.2717, -0.2618],
[ 0.4234, 0.2481, -0.4733]])
>>> s
tensor([2.3289, 2.0315, 0.7806])
>>> v
tensor([[-0.0199, 0.8766, 0.4809],
[-0.5080, 0.4054, -0.7600],
[ 0.8611, 0.2594, -0.4373]])
>>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
tensor(8.6531e-07)
>>> a_big = torch.randn(7, 5, 3)
>>> u, s, v = torch.svd(a_big)
>>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
tensor(2.6503e-06)
.. _the resulting vectors will span the same subspace:
(https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
""",
)
add_docstr(
torch.symeig,
r"""
symeig(input, eigenvectors=False, upper=True, *, out=None) -> (Tensor, Tensor)
This function returns eigenvalues and eigenvectors
of a real symmetric or complex Hermitian matrix :attr:`input` or a batch thereof,
represented by a namedtuple (eigenvalues, eigenvectors).
This function calculates all eigenvalues (and vectors) of :attr:`input`
such that :math:`\text{input} = V \text{diag}(e) V^T`.
The boolean argument :attr:`eigenvectors` defines computation of
both eigenvectors and eigenvalues or eigenvalues only.
If it is ``False``, only eigenvalues are computed. If it is ``True``,
both eigenvalues and eigenvectors are computed.
Since the input matrix :attr:`input` is supposed to be symmetric or Hermitian,
only the upper triangular portion is used by default.
If :attr:`upper` is ``False``, then lower triangular portion is used.
.. warning::
:func:`torch.symeig` is deprecated in favor of :func:`torch.linalg.eigh`
and will be removed in a future PyTorch release. The default behavior has changed
from using the upper triangular portion of the matrix by default to using the
lower triangular portion.
``L, _ = torch.symeig(A, upper=upper)`` should be replaced with
.. code :: python
UPLO = "U" if upper else "L"
L = torch.linalg.eigvalsh(A, UPLO=UPLO)
``L, V = torch.symeig(A, eigenvectors=True, upper=upper)`` should be replaced with
.. code :: python
UPLO = "U" if upper else "L"
L, V = torch.linalg.eigh(A, UPLO=UPLO)
.. note:: The eigenvalues are returned in ascending order. If :attr:`input` is a batch of matrices,
then the eigenvalues of each matrix in the batch is returned in ascending order.
.. note:: Irrespective of the original strides, the returned matrix `V` will
be transposed, i.e. with strides `V.contiguous().mT.stride()`.
.. warning:: Extra care needs to be taken when backward through outputs. Such
operation is only stable when all eigenvalues are distinct and becomes
less stable the smaller :math:`\min_{i \neq j} |\lambda_i - \lambda_j|` is.
Args:
input (Tensor): the input tensor of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions consisting of symmetric or Hermitian matrices.
eigenvectors(bool, optional): controls whether eigenvectors have to be computed
upper(bool, optional): controls whether to consider upper-triangular or lower-triangular region
Keyword args:
out (tuple, optional): the output tuple of (Tensor, Tensor)
Returns:
(Tensor, Tensor): A namedtuple (eigenvalues, eigenvectors) containing
- **eigenvalues** (*Tensor*): Shape :math:`(*, m)`. The eigenvalues in ascending order.
- **eigenvectors** (*Tensor*): Shape :math:`(*, m, m)`.
If ``eigenvectors=False``, it's an empty tensor.
Otherwise, this tensor contains the orthonormal eigenvectors of the ``input``.
Examples::
>>> a = torch.randn(5, 5)
>>> a = a + a.t() # To make a symmetric
>>> a
tensor([[-5.7827, 4.4559, -0.2344, -1.7123, -1.8330],
[ 4.4559, 1.4250, -2.8636, -3.2100, -0.1798],
[-0.2344, -2.8636, 1.7112, -5.5785, 7.1988],
[-1.7123, -3.2100, -5.5785, -2.6227, 3.1036],
[-1.8330, -0.1798, 7.1988, 3.1036, -5.1453]])
>>> e, v = torch.symeig(a, eigenvectors=True)
>>> e
tensor([-13.7012, -7.7497, -2.3163, 5.2477, 8.1050])
>>> v
tensor([[ 0.1643, 0.9034, -0.0291, 0.3508, 0.1817],
[-0.2417, -0.3071, -0.5081, 0.6534, 0.4026],
[-0.5176, 0.1223, -0.0220, 0.3295, -0.7798],
[-0.4850, 0.2695, -0.5773, -0.5840, 0.1337],
[ 0.6415, -0.0447, -0.6381, -0.0193, -0.4230]])
>>> a_big = torch.randn(5, 2, 2)
>>> a_big = a_big + a_big.mT # To make a_big symmetric
>>> e, v = a_big.symeig(eigenvectors=True)
>>> torch.allclose(torch.matmul(v, torch.matmul(e.diag_embed(), v.mT)), a_big)
True
""",
)
add_docstr(
torch.t,
r"""
t(input) -> Tensor
Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
and 1.
0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
is equivalent to ``transpose(input, 0, 1)``.
Args:
{input}
Example::
>>> x = torch.randn(())
>>> x
tensor(0.1995)
>>> torch.t(x)
tensor(0.1995)
>>> x = torch.randn(3)
>>> x
tensor([ 2.4320, -0.4608, 0.7702])
>>> torch.t(x)
tensor([ 2.4320, -0.4608, 0.7702])
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.4875, 0.9158, -0.5872],
[ 0.3938, -0.6929, 0.6932]])
>>> torch.t(x)
tensor([[ 0.4875, 0.3938],
[ 0.9158, -0.6929],
[-0.5872, 0.6932]])
See also :func:`torch.transpose`.
""".format(
**common_args
),
)
add_docstr(
torch.flip,
r"""
flip(input, dims) -> Tensor
Reverse the order of a n-D tensor along given axis in dims.
.. note::
`torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.flip` is expected to be slower than `np.flip`.
Args:
{input}
dims (a list or tuple): axis to flip on
Example::
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]]])
>>> torch.flip(x, [0, 1])
tensor([[[ 6, 7],
[ 4, 5]],
[[ 2, 3],
[ 0, 1]]])
""".format(
**common_args
),
)
add_docstr(
torch.fliplr,
r"""
fliplr(input) -> Tensor
Flip tensor in the left/right direction, returning a new tensor.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Note:
Requires the tensor to be at least 2-D.
.. note::
`torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.fliplr` is expected to be slower than `np.fliplr`.
Args:
input (Tensor): Must be at least 2-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.fliplr(x)
tensor([[1, 0],
[3, 2]])
""".format(
**common_args
),
)
add_docstr(
torch.flipud,
r"""
flipud(input) -> Tensor
Flip tensor in the up/down direction, returning a new tensor.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Note:
Requires the tensor to be at least 1-D.
.. note::
`torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`torch.flipud` is expected to be slower than `np.flipud`.
Args:
input (Tensor): Must be at least 1-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.flipud(x)
tensor([[2, 3],
[0, 1]])
""".format(
**common_args
),
)
add_docstr(
torch.roll,
r"""
roll(input, shifts, dims=None) -> Tensor
Roll the tensor :attr:`input` along the given dimension(s). Elements that are
shifted beyond the last position are re-introduced at the first position. If
:attr:`dims` is `None`, the tensor will be flattened before rolling and then
restored to the original shape.
Args:
{input}
shifts (int or tuple of ints): The number of places by which the elements
of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
the same size, and each dimension will be rolled by the corresponding
value
dims (int or tuple of ints): Axis along which to roll
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
>>> x
tensor([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
>>> torch.roll(x, 1)
tensor([[8, 1],
[2, 3],
[4, 5],
[6, 7]])
>>> torch.roll(x, 1, 0)
tensor([[7, 8],
[1, 2],
[3, 4],
[5, 6]])
>>> torch.roll(x, -1, 0)
tensor([[3, 4],
[5, 6],
[7, 8],
[1, 2]])
>>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
tensor([[6, 5],
[8, 7],
[2, 1],
[4, 3]])
""".format(
**common_args
),
)
add_docstr(
torch.rot90,
r"""
rot90(input, k, dims) -> Tensor
Rotate a n-D tensor by 90 degrees in the plane specified by dims axis.
Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
Args:
{input}
k (int): number of times to rotate
dims (a list or tuple): axis to rotate
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.rot90(x, 1, [0, 1])
tensor([[1, 3],
[0, 2]])
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.rot90(x, 1, [1, 2])
tensor([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
""".format(
**common_args
),
)
add_docstr(
torch.take,
r"""
take(input, index) -> Tensor
Returns a new tensor with the elements of :attr:`input` at the given indices.
The input tensor is treated as if it were viewed as a 1-D tensor. The result
takes the same shape as the indices.
Args:
{input}
index (LongTensor): the indices into tensor
Example::
>>> src = torch.tensor([[4, 3, 5],
... [6, 7, 8]])
>>> torch.take(src, torch.tensor([0, 2, 5]))
tensor([ 4, 5, 8])
""".format(
**common_args
),
)
add_docstr(
torch.take_along_dim,
r"""
take_along_dim(input, indices, dim, *, out=None) -> Tensor
Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
are designed to work with this function. See the examples below.
.. note::
This function is similar to NumPy's `take_along_axis`.
See also :func:`torch.gather`.
Args:
{input}
indices (tensor): the indices into :attr:`input`. Must have long dtype.
dim (int): dimension to select along.
Keyword args:
{out}
Example::
>>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
>>> max_idx = torch.argmax(t)
>>> torch.take_along_dim(t, max_idx)
tensor([60])
>>> sorted_idx = torch.argsort(t, dim=1)
>>> torch.take_along_dim(t, sorted_idx, dim=1)
tensor([[10, 20, 30],
[40, 50, 60]])
""".format(
**common_args
),
)
add_docstr(
torch.tan,
r"""
tan(input, *, out=None) -> Tensor
Returns a new tensor with the tangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.2027, -1.7687, 0.4412, -1.3856])
>>> torch.tan(a)
tensor([-2.5930, 4.9859, 0.4722, -5.3366])
""".format(
**common_args
),
)
add_docstr(
torch.tanh,
r"""
tanh(input, *, out=None) -> Tensor
Returns a new tensor with the hyperbolic tangent of the elements
of :attr:`input`.
.. math::
\text{out}_{i} = \tanh(\text{input}_{i})
"""
+ r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
>>> torch.tanh(a)
tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
""".format(
**common_args
),
)
add_docstr(
torch.topk,
r"""
topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
a given dimension.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
A namedtuple of `(values, indices)` is returned with the `values` and
`indices` of the largest `k` elements of each row of the `input` tensor in the
given dimension `dim`.
The boolean option :attr:`sorted` if ``True``, will make sure that the returned
`k` elements are themselves sorted
Args:
{input}
k (int): the k in "top-k"
dim (int, optional): the dimension to sort along
largest (bool, optional): controls whether to return largest or
smallest elements
sorted (bool, optional): controls whether to return the elements
in sorted order
Keyword args:
out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.topk(x, 3)
torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
""".format(
**common_args
),
)
add_docstr(
torch.trace,
r"""
trace(input) -> Tensor
Returns the sum of the elements of the diagonal of the input 2-D matrix.
Example::
>>> x = torch.arange(1., 10.).view(3, 3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.],
[ 7., 8., 9.]])
>>> torch.trace(x)
tensor(15.)
""",
)
add_docstr(
torch.transpose,
r"""
transpose(input, dim0, dim1) -> Tensor
Returns a tensor that is a transposed version of :attr:`input`.
The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
If :attr:`input` is a strided tensor then the resulting :attr:`out`
tensor shares its underlying storage with the :attr:`input` tensor, so
changing the content of one would change the content of the other.
If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
resulting :attr:`out` tensor *does not* share the underlying storage
with the :attr:`input` tensor.
Args:
{input}
dim0 (int): the first dimension to be transposed
dim1 (int): the second dimension to be transposed
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 1.0028, -0.9893, 0.5809],
[-0.1669, 0.7299, 0.4942]])
>>> torch.transpose(x, 0, 1)
tensor([[ 1.0028, -0.1669],
[-0.9893, 0.7299],
[ 0.5809, 0.4942]])
See also :func:`torch.t`.
""".format(
**common_args
),
)
add_docstr(
torch.triangular_solve,
r"""
triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
and multiple right-hand sides :math:`b`.
In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
(or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `X`
If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
:attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
the result may contain `NaN` s.
Supports input of float, double, cfloat and cdouble data types.
.. warning::
:func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
and will be removed in a future PyTorch release.
:func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
copy of one of the inputs.
``X = torch.triangular_solve(B, A).solution`` should be replaced with
.. code:: python
X = torch.linalg.solve_triangular(A, B)
Args:
b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
:math:`*` is zero of more batch dimensions
A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
where :math:`*` is zero or more batch dimensions
upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
and `op(A) = A` if it is ``False``. Default: ``False``.
unitriangular (bool, optional): whether :math:`A` is unit triangular.
If True, the diagonal elements of :math:`A` are assumed to be
1 and not referenced from :math:`A`. Default: ``False``.
Keyword args:
out ((Tensor, Tensor), optional): tuple of two tensors to write
the output to. Ignored if `None`. Default: `None`.
Returns:
A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
(or whatever variant of the system of equations, depending on the keyword arguments.)
Examples::
>>> A = torch.randn(2, 2).triu()
>>> A
tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]])
>>> b = torch.randn(2, 3)
>>> b
tensor([[-0.0210, 2.3513, -1.5492],
[ 1.5429, 0.7403, -1.0243]])
>>> torch.triangular_solve(b, A)
torch.return_types.triangular_solve(
solution=tensor([[ 1.7841, 2.9046, -2.5405],
[ 1.9320, 0.9270, -1.2826]]),
cloned_coefficient=tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]]))
""",
)
add_docstr(
torch.tril,
r"""
tril(input, diagonal=0, *, out=None) -> Tensor
Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
"""
+ r"""
Args:
{input}
diagonal (int, optional): the diagonal to consider
Keyword args:
{out}
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0813, -0.8619, 0.7105],
[ 0.0935, 0.1380, 2.2112],
[-0.3409, -0.9828, 0.0289]])
>>> torch.tril(a)
tensor([[-1.0813, 0.0000, 0.0000],
[ 0.0935, 0.1380, 0.0000],
[-0.3409, -0.9828, 0.0289]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
[ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
>>> torch.tril(b, diagonal=1)
tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
>>> torch.tril(b, diagonal=-1)
tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
""".format(
**common_args
),
)
# docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
# as common args.
add_docstr(
torch.tril_indices,
r"""
tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the lower triangular part of a :attr:`row`-by-
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
"""
+ r"""
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
{device}
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.tril_indices(3, 3)
>>> a
tensor([[0, 1, 1, 2, 2, 2],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, -1)
>>> a
tensor([[1, 2, 2, 3, 3, 3],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
[0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.triu,
r"""
triu(input, diagonal=0, *, out=None) -> Tensor
Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
"""
+ r"""
Args:
{input}
diagonal (int, optional): the diagonal to consider
Keyword args:
{out}
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.3480, -0.5211, -0.4573]])
>>> torch.triu(a)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.0000, -1.0680, 0.6602],
[ 0.0000, 0.0000, -0.4573]])
>>> torch.triu(a, diagonal=1)
tensor([[ 0.0000, 0.5207, 2.0049],
[ 0.0000, 0.0000, 0.6602],
[ 0.0000, 0.0000, 0.0000]])
>>> torch.triu(a, diagonal=-1)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.0000, -0.5211, -0.4573]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=1)
tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=-1)
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
""".format(
**common_args
),
)
# docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
# as common args.
add_docstr(
torch.triu_indices,
r"""
triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the upper triangular part of a :attr:`row` by
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
"""
+ r"""
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
{device}
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.triu_indices(3, 3)
>>> a
tensor([[0, 0, 0, 1, 1, 2],
[0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, -1)
>>> a
tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
[0, 1, 2, 0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1],
[1, 2, 2]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.true_divide,
r"""
true_divide(dividend, divisor, *, out) -> Tensor
Alias for :func:`torch.div` with ``rounding_mode=None``.
""",
)
add_docstr(
torch.trunc,
r"""
trunc(input, *, out=None) -> Tensor
Returns a new tensor with the truncated integer values of
the elements of :attr:`input`.
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
>>> torch.trunc(a)
tensor([ 3., 0., -0., -0.])
""".format(
**common_args
),
)
add_docstr(
torch.fake_quantize_per_tensor_affine,
r"""
fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
.. math::
\text{output} = min(
\text{quant\_max},
max(
\text{quant\_min},
\text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
)
)
Args:
input (Tensor): the input value(s), in ``torch.float32``.
scale (double or Tensor): quantization scale
zero_point (int64 or Tensor): quantization zero_point
quant_min (int64): lower bound of the quantized domain
quant_max (int64): upper bound of the quantized domain
Returns:
Tensor: A newly fake_quantized tensor
Example::
>>> x = torch.randn(4)
>>> x
tensor([ 0.0552, 0.9730, 0.3973, -1.0780])
>>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
tensor([0.1000, 1.0000, 0.4000, 0.0000])
>>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
tensor([0.6000, 0.4000, 0.0000, 0.0000])
""",
)
add_docstr(
torch.fake_quantize_per_channel_affine,
r"""
fake_quantize_per_channel_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
.. math::
\text{output} = min(
\text{quant\_max},
max(
\text{quant\_min},
\text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
)
)
Args:
input (Tensor): the input value(s), in ``torch.float32``.
scale (Tensor): quantization scale, per channel
zero_point (Tensor): quantization zero_point, per channel
axis (int32): channel axis
quant_min (int64): lower bound of the quantized domain
quant_max (int64): upper bound of the quantized domain
Returns:
Tensor: A newly fake_quantized per channel tensor
Example::
>>> x = torch.randn(2, 2, 2)
>>> x
tensor([[[-0.2525, -0.0466],
[ 0.3491, -0.2168]],
[[-0.5906, 1.6258],
[ 0.6444, -0.0542]]])
>>> scales = (torch.randn(2) + 1) * 0.05
>>> scales
tensor([0.0475, 0.0486])
>>> zero_points = torch.zeros(2).to(torch.long)
>>> zero_points
tensor([0, 0])
>>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
tensor([[[0.0000, 0.0000],
[0.3405, 0.0000]],
[[0.0000, 1.6134],
[0.6323, 0.0000]]])
""",
)
add_docstr(
torch.fix,
r"""
fix(input, *, out=None) -> Tensor
Alias for :func:`torch.trunc`
""",
)
add_docstr(
torch.unsqueeze,
r"""
unsqueeze(input, dim) -> Tensor
Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
applied at :attr:`dim` = ``dim + input.dim() + 1``.
Args:
{input}
dim (int): the index at which to insert the singleton dimension
Example::
>>> x = torch.tensor([1, 2, 3, 4])
>>> torch.unsqueeze(x, 0)
tensor([[ 1, 2, 3, 4]])
>>> torch.unsqueeze(x, 1)
tensor([[ 1],
[ 2],
[ 3],
[ 4]])
""".format(
**common_args
),
)
add_docstr(
torch.var,
r"""
var(input, dim, unbiased, keepdim=False, *, out=None) -> Tensor
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample variance is calculated, without any correction.
Args:
{input}
{opt_dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
.. function:: var(input, unbiased) -> Tensor
:noindex:
Calculates the variance of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.var(a, unbiased=False)
tensor(0.1754)
""".format(
**multi_dim_common
),
)
add_docstr(
torch.var_mean,
r"""
var_mean(input, dim, unbiased, keepdim=False, *, out=None) -> (Tensor, Tensor)
If :attr:`unbiased` is ``True``, Bessel's correction will be used to calculate
the variance. Otherwise, the sample variance is calculated, without any
correction.
Args:
{input}
{opt_dim}
Keyword args:
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
{keepdim}
{out}
Returns:
A tuple (var, mean) containing the variance and mean.
.. function:: var_mean(input, unbiased) -> (Tensor, Tensor)
:noindex:
Calculates the variance and mean of all elements in the :attr:`input`
tensor.
If :attr:`unbiased` is ``True``, Bessel's correction will be used.
Otherwise, the sample deviation is calculated, without any correction.
Args:
{input}
unbiased (bool): whether to use Bessel's correction (:math:`\delta N = 1`).
Returns:
A tuple (var, mean) containing the variance and mean.
Example::
>>> a = torch.tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.var_mean(a, unbiased=False)
(tensor(0.1754), tensor(-0.8509))
""".format(
**multi_dim_common
),
)
add_docstr(
torch.zeros,
r"""
zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.zeros(2, 3)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> torch.zeros(5)
tensor([ 0., 0., 0., 0., 0.])
""".format(
**factory_common_args
),
)
add_docstr(
torch.zeros_like,
r"""
zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `0`, with the same size as
:attr:`input`. ``torch.zeros_like(input)`` is equivalent to
``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.zeros_like(input, out=output)`` is equivalent to
``torch.zeros(input.size(), out=output)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> input = torch.empty(2, 3)
>>> torch.zeros_like(input)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
""".format(
**factory_like_common_args
),
)
add_docstr(
torch.empty,
"""
empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \
memory_format=torch.contiguous_format) -> Tensor
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
{pin_memory}
{memory_format}
Example::
>>> torch.empty((2,3), dtype=torch.int64)
tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
[ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.empty_like,
r"""
empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns an uninitialized tensor with the same size as :attr:`input`.
``torch.empty_like(input)`` is equivalent to
``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
>>> torch.empty_like(a)
tensor([[0, 0, 0],
[0, 0, 0]], device='cuda:0', dtype=torch.int32)
""".format(
**factory_like_common_args
),
)
add_docstr(
torch.empty_strided,
r"""
empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
.. warning::
If the constructed tensor is "overlapped" (with multiple indices referring to the same element
in memory) its behavior is undefined.
Args:
size (tuple of int): the shape of the output tensor
stride (tuple of int): the strides of the output tensor
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{pin_memory}
Example::
>>> a = torch.empty_strided((2, 3), (1, 2))
>>> a
tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
[0.0000e+00, 0.0000e+00, 3.0705e-41]])
>>> a.stride()
(1, 2)
>>> a.size()
torch.Size([2, 3])
""".format(
**factory_common_args
),
)
add_docstr(
torch.full,
r"""
full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
tensor's dtype is inferred from :attr:`fill_value`.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
fill_value (Scalar): the value to fill the output tensor with.
Keyword args:
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.full((2, 3), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.full_like,
"""
full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
``torch.full_like(input, fill_value)`` is equivalent to
``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
fill_value: the number to fill the output tensor with.
Keyword args:
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(
**factory_like_common_args
),
)
add_docstr(
torch.det,
r"""
det(input) -> Tensor
Alias for :func:`torch.linalg.det`
""",
)
add_docstr(
torch.where,
r"""
where(condition, x, y) -> Tensor
Return a tensor of elements selected from either :attr:`x` or :attr:`y`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{x}_i & \text{if } \text{condition}_i \\
\text{y}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`x`, :attr:`y` must be :ref:`broadcastable <broadcasting-semantics>`.
Arguments:
condition (BoolTensor): When True (nonzero), yield x, otherwise yield y
x (Tensor or Scalar): value (if :attr:`x` is a scalar) or values selected at indices
where :attr:`condition` is ``True``
y (Tensor or Scalar): value (if :attr:`y` is a scalar) or values selected at indices
where :attr:`condition` is ``False``
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`x`, :attr:`y`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
.. function:: where(condition) -> tuple of LongTensor
:noindex:
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
""",
)
add_docstr(
torch.logdet,
r"""
logdet(input) -> Tensor
Calculates log determinant of a square matrix or batches of square matrices.
It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
a negative determinant.
.. note::
Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
is not invertible. In this case, double backward through :meth:`logdet` will
be unstable in when :attr:`input` doesn't have distinct singular values. See
:func:`torch.linalg.svd` for details.
.. seealso::
:func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
absolute value of the determinant of real-valued (resp. complex) square matrices.
Arguments:
input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
batch dimensions.
Example::
>>> A = torch.randn(3, 3)
>>> torch.det(A)
tensor(0.2611)
>>> torch.logdet(A)
tensor(-1.3430)
>>> A
tensor([[[ 0.9254, -0.6213],
[-0.5787, 1.6843]],
[[ 0.3242, -0.9665],
[ 0.4539, -0.0887]],
[[ 1.1336, -0.4025],
[-0.7089, 0.9032]]])
>>> A.det()
tensor([1.1990, 0.4099, 0.7386])
>>> A.det().log()
tensor([ 0.1815, -0.8917, -0.3031])
""",
)
add_docstr(
torch.slogdet,
r"""
slogdet(input) -> (Tensor, Tensor)
Alias for :func:`torch.linalg.slogdet`
""",
)
add_docstr(
torch.pinverse,
r"""
pinverse(input, rcond=1e-15) -> Tensor
Alias for :func:`torch.linalg.pinv`
""",
)
add_docstr(
torch.hann_window,
"""
hann_window(window_length, periodic=True, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
"""
+ r"""
Hann window function.
.. math::
w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
\sin^2 \left( \frac{\pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hann_window(L, periodic=True)`` equal to
``torch.hann_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
"""
+ r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(
**factory_common_args
),
)
add_docstr(
torch.hamming_window,
"""
hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
"""
+ r"""
Hamming window function.
.. math::
w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hamming_window(L, periodic=True)`` equal to
``torch.hamming_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
.. note::
This is a generalized version of :meth:`torch.hann_window`.
"""
+ r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
alpha (float, optional): The coefficient :math:`\alpha` in the equation above
beta (float, optional): The coefficient :math:`\beta` in the equation above
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(
**factory_common_args
),
)
add_docstr(
torch.bartlett_window,
"""
bartlett_window(window_length, periodic=True, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
"""
+ r"""
Bartlett window function.
.. math::
w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
\frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
\end{cases},
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.bartlett_window(L, periodic=True)`` equal to
``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
"""
+ r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(
**factory_common_args
),
)
add_docstr(
torch.blackman_window,
"""
blackman_window(window_length, periodic=True, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
"""
+ r"""
Blackman window function.
.. math::
w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.blackman_window(L, periodic=True)`` equal to
``torch.blackman_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
"""
+ r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
Keyword args:
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(
**factory_common_args
),
)
add_docstr(
torch.kaiser_window,
"""
kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
"""
+ r"""
Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
where ``L`` is the :attr:`window_length`. This function computes:
.. math::
out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
The :attr:`periodic` argument is intended as a helpful shorthand
to produce a periodic window as input to functions like :func:`torch.stft`.
.. note::
If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
"""
+ r"""
Args:
window_length (int): length of the window.
periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta (float, optional): shape parameter for the window.
Keyword args:
{dtype}
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
""".format(
**factory_common_args
),
)
add_docstr(
torch.vander,
"""
vander(x, N=None, increasing=False) -> Tensor
"""
+ r"""
Generates a Vandermonde matrix.
The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
Arguments:
x (Tensor): 1-D input tensor.
N (int, optional): Number of columns in the output. If N is not specified,
a square array is returned :math:`(N = len(x))`.
increasing (bool, optional): Order of the powers of the columns. If True,
the powers increase from left to right, if False (the default) they are reversed.
Returns:
Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
Example::
>>> x = torch.tensor([1, 2, 3, 5])
>>> torch.vander(x)
tensor([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> torch.vander(x, N=3)
tensor([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> torch.vander(x, N=3, increasing=True)
tensor([[ 1, 1, 1],
[ 1, 2, 4],
[ 1, 3, 9],
[ 1, 5, 25]])
""".format(
**factory_common_args
),
)
add_docstr(
torch.unbind,
r"""
unbind(input, dim=0) -> seq
Removes a tensor dimension.
Returns a tuple of all slices along a given dimension, already without it.
Arguments:
input (Tensor): the tensor to unbind
dim (int): dimension to remove
Example::
>>> torch.unbind(torch.tensor([[1, 2, 3],
>>> [4, 5, 6],
>>> [7, 8, 9]]))
(tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
""",
)
add_docstr(
torch.combinations,
r"""
combinations(input, r=2, with_replacement=False) -> seq
Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
python's `itertools.combinations` when `with_replacement` is set to `False`, and
`itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
Arguments:
input (Tensor): 1D vector.
r (int, optional): number of elements to combine
with_replacement (bool, optional): whether to allow duplication in combination
Returns:
Tensor: A tensor equivalent to converting all the input tensors into lists, do
`itertools.combinations` or `itertools.combinations_with_replacement` on these
lists, and finally convert the resulting list into tensor.
Example::
>>> a = [1, 2, 3]
>>> list(itertools.combinations(a, r=2))
[(1, 2), (1, 3), (2, 3)]
>>> list(itertools.combinations(a, r=3))
[(1, 2, 3)]
>>> list(itertools.combinations_with_replacement(a, r=2))
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
>>> tensor_a = torch.tensor(a)
>>> torch.combinations(tensor_a)
tensor([[1, 2],
[1, 3],
[2, 3]])
>>> torch.combinations(tensor_a, r=3)
tensor([[1, 2, 3]])
>>> torch.combinations(tensor_a, with_replacement=True)
tensor([[1, 1],
[1, 2],
[1, 3],
[2, 2],
[2, 3],
[3, 3]])
""",
)
add_docstr(
torch.trapezoid,
r"""
trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
:attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
the default computation is
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`dx` is specified the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
\end{aligned}
effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
assuming :attr:`x` is also a one-dimensional tensor with
elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
.. math::
\begin{aligned}
\sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
\end{aligned}
When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
and :attr:`y`, the function computes the difference between consecutive elements along
dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
See the examples below for details.
.. note::
The trapezoidal rule is a technique for approximating the definite integral of a function
by averaging its left and right Riemann sums. The approximation becomes more accurate as
the resolution of the partition increases.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
>>> y = torch.tensor([1, 5, 10])
>>> torch.trapezoid(y)
tensor(10.5)
>>> # Computes the same trapezoidal rule directly to verify
>>> (1 + 10 + 10) / 2
10.5
>>> # Computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.trapezoid(y, dx=2)
21.0
>>> # Computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
28.5
>>> # Computes the same trapezoidal rule directly to verify
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.trapezoid(y)
tensor([ 2., 8., 14.])
>>> # Computes the trapezoidal rule for each column of the matrix
>>> torch.trapezoid(y, dim=0)
tensor([ 6., 8., 10.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.trapezoid(y, x)
array([5., 5., 5.])
>>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.trapezoid(y, x)
array([2., 4., 6.])
""",
)
add_docstr(
torch.trapz,
r"""
trapz(y, x, *, dim=-1) -> Tensor
Alias for :func:`torch.trapezoid`.
""",
)
add_docstr(
torch.cumulative_trapezoid,
r"""
cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
used to specify arbitrary spacing along :attr:`dim`.
For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
and this function is that, :func:`torch.trapezoid` returns a value for each integration,
where as this function returns a cumulative value for every spacing within the integration. This
is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
Arguments:
y (Tensor): Values to use when computing the trapezoidal rule.
x (Tensor): If specified, defines spacing between values as specified above.
Keyword arguments:
dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
are specified then this defaults to 1. Effectively multiplies the result by its value.
dim (int): The dimension along which to compute the trapezoidal rule.
The last (inner-most) dimension by default.
Examples::
>>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
>>> y = torch.tensor([1, 5, 10])
>>> torch.cumulative_trapezoid(y)
tensor([3., 10.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> (1 + 5) / 2
3.0
>>> (1 + 10 + 10) / 2
10.5
>>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
>>> # NOTE: the result is the same as before, but multiplied by 2
>>> torch.cumulative_trapezoid(y, dx=2)
tensor([6., 21.])
>>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([6., 28.5])
>>> # Computes the same trapezoidal rule directly up to each element to verify
>>> ((3 - 1) * (1 + 5)) / 2
6.0
>>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
28.5
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
>>> y = torch.arange(9).reshape(3, 3)
tensor([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> torch.cumulative_trapezoid(y)
tensor([[ 0.5, 2.],
[ 3.5, 8.],
[ 6.5, 14.]])
>>> # Cumulatively computes the trapezoidal rule for each column of the matrix
>>> torch.cumulative_trapezoid(y, dim=0)
tensor([[ 1.5, 2.5, 3.5],
[ 6.0, 8.0, 10.0]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with the same arbitrary spacing
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([1, 3, 6])
>>> torch.cumulative_trapezoid(y, x)
tensor([[2., 5.],
[2., 5.],
[2., 5.]])
>>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
>>> # with different arbitrary spacing per row
>>> y = torch.ones(3, 3)
>>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
>>> torch.cumulative_trapezoid(y, x)
tensor([[1., 2.],
[2., 4.],
[3., 6.]])
""",
)
add_docstr(
torch.repeat_interleave,
r"""
repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
Repeat elements of a tensor.
.. warning::
This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
Args:
{input}
repeats (Tensor or int): The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis.
dim (int, optional): The dimension along which to repeat values.
By default, use the flattened input array, and return a flat output
array.
Keyword args:
output_size (int, optional): Total output size for the given axis
( e.g. sum of repeats). If given, it will avoid stream syncronization
needed to calculate output shape of the tensor.
Returns:
Tensor: Repeated tensor which has the same shape as input, except along the given axis.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat_interleave(2)
tensor([1, 1, 2, 2, 3, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.repeat_interleave(y, 2)
tensor([1, 1, 2, 2, 3, 3, 4, 4])
>>> torch.repeat_interleave(y, 3, dim=1)
tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
tensor([[1, 2],
[3, 4],
[3, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
tensor([[1, 2],
[3, 4],
[3, 4]])
.. function:: repeat_interleave(repeats, *, output_size=None) -> Tensor
:noindex:
If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
`1` appears `n2` times, `2` appears `n3` times, etc.
""".format(
**common_args
),
)
add_docstr(
torch.tile,
r"""
tile(input, dims) -> Tensor
Constructs a tensor by repeating the elements of :attr:`input`.
The :attr:`dims` argument specifies the number of repetitions
in each dimension.
If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
ones are prepended to :attr:`dims` until all dimensions are specified.
For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
specifies, then :attr:`input` is treated as if it were unsqueezed at
dimension zero until it has as many dimensions as :attr:`dims` specifies.
For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
shape (1, 1, 4, 2).
.. note::
This function is similar to NumPy's tile function.
Args:
input (Tensor): the tensor whose elements to repeat.
dims (tuple): the number of repetitions per dimension.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.tile((2,))
tensor([1, 2, 3, 1, 2, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.tile(y, (2, 2))
tensor([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
""",
)
add_docstr(
torch.quantize_per_tensor,
r"""
quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
Converts a float tensor to a quantized tensor with given scale and zero point.
Arguments:
input (Tensor): float tensor or list of tensors to quantize
scale (float or Tensor): scale to apply in quantization formula
zero_point (int or Tensor): offset in integer value that maps to float zero
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor or list of quantized tensors.
Example::
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
tensor([ 0, 10, 20, 30], dtype=torch.uint8)
>>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
>>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
(tensor([-1., 0.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
tensor([-2., 2.], size=(2,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
""",
)
add_docstr(
torch.quantize_per_tensor_dynamic,
r"""
quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
Converts a float tensor to a quantized tensor with scale and zero_point calculated
dynamically based on the input.
Arguments:
input (Tensor): float tensor or list of tensors to quantize
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
reduce_range (bool): a flag to indicate whether to reduce the range of quantized
data by 1 bit, it's required to avoid instruction overflow for some hardwares
Returns:
Tensor: A newly (dynamically) quantized tensor
Example::
>>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
>>> print(t)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
zero_point=85)
>>> t.int_repr()
tensor([ 0, 85, 170, 255], dtype=torch.uint8)
""",
)
add_docstr(
torch.quantize_per_channel,
r"""
quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
Arguments:
input (Tensor): float tensor to quantize
scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
axis (int): dimension on which apply per-channel quantization
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor
Example::
>>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
tensor([[-1., 0.],
[ 1., 2.]], size=(2, 2), dtype=torch.quint8,
quantization_scheme=torch.per_channel_affine,
scale=tensor([0.1000, 0.0100], dtype=torch.float64),
zero_point=tensor([10, 0]), axis=0)
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
tensor([[ 0, 10],
[100, 200]], dtype=torch.uint8)
""",
)
add_docstr(
torch.quantized_batch_norm,
r"""
quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
Applies batch normalization on a 4D (NCHW) quantized tensor.
.. math::
y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
Arguments:
input (Tensor): quantized tensor
weight (Tensor): float tensor that corresponds to the gamma, size C
bias (Tensor): float tensor that corresponds to the beta, size C
mean (Tensor): float mean value in batch normalization, size C
var (Tensor): float tensor for variance, size C
eps (float): a value added to the denominator for numerical stability.
output_scale (float): output quantized tensor scale
output_zero_point (int): output quantized tensor zero_point
Returns:
Tensor: A quantized tensor with batch normalization applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
tensor([[[[-0.2000, -0.2000],
[ 1.6000, -0.2000]],
[[-0.4000, -0.4000],
[-0.4000, 0.6000]]],
[[[-0.2000, -0.2000],
[-0.2000, -0.2000]],
[[ 0.6000, -0.4000],
[ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
""",
)
add_docstr(
torch.quantized_max_pool1d,
r"""
quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
Applies a 1D max pooling over an input quantized tensor composed of several input planes.
Arguments:
input (Tensor): quantized tensor
kernel_size (list of int): the size of the sliding window
stride (``list of int``, optional): the stride of the sliding window
padding (``list of int``, opttional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
Defaults to False.
Returns:
Tensor: A quantized tensor with max_pool1d applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_max_pool1d(qx, [2])
tensor([[0.0000],
[1.5000]], size=(2, 1), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
""",
)
add_docstr(
torch.quantized_max_pool2d,
r"""
quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
Applies a 2D max pooling over an input quantized tensor composed of several input planes.
Arguments:
input (Tensor): quantized tensor
kernel_size (``list of int``): the size of the sliding window
stride (``list of int``, optional): the stride of the sliding window
padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape.
Defaults to False.
Returns:
Tensor: A quantized tensor with max_pool2d applied.
Example::
>>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
>>> torch.quantized_max_pool2d(qx, [2,2])
tensor([[[[1.5000]],
[[1.5000]]],
[[[0.0000]],
[[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
""",
)
add_docstr(
torch.Generator,
r"""
Generator(device='cpu') -> Generator
Creates and returns a generator object that manages the state of the algorithm which
produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
functions.
Arguments:
device (:class:`torch.device`, optional): the desired device for the generator.
Returns:
Generator: An torch.Generator object.
Example::
>>> g_cpu = torch.Generator()
>>> g_cuda = torch.Generator(device='cuda')
""",
)
add_docstr(
torch.Generator.set_state,
r"""
Generator.set_state(new_state) -> void
Sets the Generator state.
Arguments:
new_state (torch.ByteTensor): The desired state.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu_other = torch.Generator()
>>> g_cpu.set_state(g_cpu_other.get_state())
""",
)
add_docstr(
torch.Generator.get_state,
r"""
Generator.get_state() -> Tensor
Returns the Generator state as a ``torch.ByteTensor``.
Returns:
Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
to restore a Generator to a specific point in time.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.get_state()
""",
)
add_docstr(
torch.Generator.manual_seed,
r"""
Generator.manual_seed(seed) -> Generator
Sets the seed for generating random numbers. Returns a `torch.Generator` object.
It is recommended to set a large seed, i.e. a number that has a good balance of 0
and 1 bits. Avoid having many 0 bits in the seed.
Arguments:
seed (int): The desired seed. Value must be within the inclusive range
`[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
is raised. Negative inputs are remapped to positive values with the formula
`0xffff_ffff_ffff_ffff + seed`.
Returns:
Generator: An torch.Generator object.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.manual_seed(2147483647)
""",
)
add_docstr(
torch.Generator.initial_seed,
r"""
Generator.initial_seed() -> int
Returns the initial seed for generating random numbers.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.initial_seed()
2147483647
""",
)
add_docstr(
torch.Generator.seed,
r"""
Generator.seed() -> int
Gets a non-deterministic random number from std::random_device or the current
time and uses it to seed a Generator.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.seed()
1516516984916
""",
)
add_docstr(
torch.Generator.device,
r"""
Generator.device -> device
Gets the current device of the generator.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.device
device(type='cpu')
""",
)
add_docstr(
torch._assert_async,
r"""
_assert_async(tensor) -> void
Asynchronously assert that the contents of tensor are nonzero. For CPU tensors,
this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
CUDA tensors, we DO NOT synchronize and you may only find out the assertion
failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for
testing invariants in CUDA tensors without giving up performance. This function
is NOT intended to be used for regular error checking, as it will trash your CUDA
context if the assert fails (forcing you to restart your PyTorch process.)
Args:
tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero
elements (including False for boolean tensors) cause an assertion failure
to be raised.
""",
)
add_docstr(
torch.searchsorted,
r"""
searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side='left', out=None, sorter=None) -> Tensor
Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
Return a new tensor with the same size as :attr:`values`. If :attr:`right` is False or side is
'left (default), then the left boundary of :attr:`sorted_sequence` is closed. More formally,
the returned index satisfies the following rules:
.. list-table::
:widths: 12 10 78
:header-rows: 1
* - :attr:`sorted_sequence`
- :attr:`right`
- *returned index satisfies*
* - 1-D
- False
- ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
* - 1-D
- True
- ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
* - N-D
- False
- ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
* - N-D
- True
- ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
Args:
sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
dimension unless :attr:`sorter` is provided, in which case the sequence does not
need to be sorted
values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
(one pass the last index of the *innermost* dimension). In other words, if False,
gets the lower bound index for each value in :attr:`values` on the corresponding
*innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
bound index instead. Default value is False. :attr:`side` does the same and is
preferred. It will error if :attr:`side` is set to "left" while this is True.
side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
and "right" corresponds to True for :attr:`right`. It will error if this is set to
"left" while :attr:`right` is True.
out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
:attr:`sorted_sequence` containing a sequence of indices that sort it in the
ascending order on the innermost dimension
Example::
>>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
>>> sorted_sequence
tensor([[ 1, 3, 5, 7, 9],
[ 2, 4, 6, 8, 10]])
>>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> values
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.searchsorted(sorted_sequence, values)
tensor([[1, 3, 4],
[1, 2, 4]])
>>> torch.searchsorted(sorted_sequence, values, side='right')
tensor([[2, 3, 5],
[1, 3, 4]])
>>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
>>> sorted_sequence_1d
tensor([1, 3, 5, 7, 9])
>>> torch.searchsorted(sorted_sequence_1d, values)
tensor([[1, 3, 4],
[1, 3, 4]])
""",
)
add_docstr(
torch.bucketize,
r"""
bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
as :attr:`input`. If :attr:`right` is False (default), then the left boundary is closed. More
formally, the returned index satisfies the following rules:
.. list-table::
:widths: 15 85
:header-rows: 1
* - :attr:`right`
- *returned index satisfies*
* - False
- ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
* - True
- ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
Args:
input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
boundaries (Tensor): 1-D tensor, must contain a monotonically increasing sequence.
Keyword args:
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
In other words, if False, gets the lower bound index for each value in :attr:`input`
from :attr:`boundaries`. If True, gets the upper bound index instead.
Default value is False.
out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
Example::
>>> boundaries = torch.tensor([1, 3, 5, 7, 9])
>>> boundaries
tensor([1, 3, 5, 7, 9])
>>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> v
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.bucketize(v, boundaries)
tensor([[1, 3, 4],
[1, 3, 4]])
>>> torch.bucketize(v, boundaries, right=True)
tensor([[2, 3, 5],
[2, 3, 5]])
""",
)
add_docstr(
torch.view_as_real_copy,
r"""
Performs the same operation as :func:`torch.view_as_real`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.view_as_complex_copy,
r"""
Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.as_strided_copy,
r"""
Performs the same operation as :func:`torch.as_strided`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.diagonal_copy,
r"""
Performs the same operation as :func:`torch.diagonal`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.expand_copy,
r"""
Performs the same operation as :func:`torch.expand`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.permute_copy,
r"""
Performs the same operation as :func:`torch.permute`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.select_copy,
r"""
Performs the same operation as :func:`torch.select`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.detach_copy,
r"""
Performs the same operation as :func:`torch.detach`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.slice_copy,
r"""
Performs the same operation as :func:`torch.slice`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.split_copy,
r"""
Performs the same operation as :func:`torch.split`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.split_with_sizes_copy,
r"""
Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.squeeze_copy,
r"""
Performs the same operation as :func:`torch.squeeze`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.t_copy,
r"""
Performs the same operation as :func:`torch.t`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.transpose_copy,
r"""
Performs the same operation as :func:`torch.transpose`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.unsqueeze_copy,
r"""
Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.indices_copy,
r"""
Performs the same operation as :func:`torch.indices`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.values_copy,
r"""
Performs the same operation as :func:`torch.values`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.crow_indices_copy,
r"""
Performs the same operation as :func:`torch.crow_indices`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.col_indices_copy,
r"""
Performs the same operation as :func:`torch.col_indices`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.unbind_copy,
r"""
Performs the same operation as :func:`torch.unbind`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.view_copy,
r"""
Performs the same operation as :func:`torch.view`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.unfold_copy,
r"""
Performs the same operation as :func:`torch.unfold`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
add_docstr(
torch.alias_copy,
r"""
Performs the same operation as :func:`torch.alias`, but all output tensors
are freshly created instead of aliasing the input.
""",
)
|
pytorch-master
|
torch/_torch_docs.py
|
import contextlib
import ctypes
import sys
import types
import torch._C
import torch.jit
from torch import _utils_internal
# Query `hasattr` only once.
_SET_GLOBAL_FLAGS = hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags")
@contextlib.contextmanager
def dl_open_guard():
"""
Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a
shared library to load custom operators.
"""
if _SET_GLOBAL_FLAGS:
old_flags = sys.getdlopenflags()
sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
yield
if _SET_GLOBAL_FLAGS:
sys.setdlopenflags(old_flags)
# Each OpOverload object contains pointer to a a specific operator overload, a pointer to the parent `OpOverloadPacket` object.
# You can obtain an OpOverload object through attribute query on OpOverloadPacket.
class OpOverload:
def __init__(self, overloadpacket, op, op_dk, schema, tags):
self._op = op
self._op_dk = op_dk
self._schema = schema
self._overloadpacket = overloadpacket
self._tags = tags
self._overloadname = (
"default" if schema.overload_name == "" else schema.overload_name
)
self.name = self._schema.name
if schema.overload_name:
self.name += "." + schema.overload_name
self.__name__ = "{}.{}".format(
self._schema.name.split("::")[1], self._overloadname
)
self.__module__ = overloadpacket.__module__
op.__module__ = overloadpacket.__module__
# it's a no-op since OpOverload object is immutable and must be unique for a given op overload.
def __deepcopy__(self, memo=None):
return self
def __repr__(self):
return "<OpOverload(op='{}.{}', overload='{}')>".format(
*self._schema.name.split("::"), self._overloadname
)
def __call__(self, *args, **kwargs):
return self._op(*args, **kwargs or {})
def __getattr__(self, key):
return getattr(self._op, key)
def __hash__(self):
return hash(self._op)
# `my_namespace.my_op_name.overload_name`
def __str__(self):
return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname)
def decompose(self, *args, **kwargs):
dk = "CompositeImplicitAutograd"
if torch._C._dispatch_has_kernel_for_dispatch_key(self.name, dk):
return self._op_dk(dk, *args, **kwargs)
else:
return NotImplemented
@property
def overloadpacket(self):
return self._overloadpacket
@property
def op(self):
return self._op
@property
def tags(self):
return self._tags
# TODO: add more methods to expose information about input and output arguments
# OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
# You can obtain an OpOverload object through attribute query.
class OpOverloadPacket:
def __init__(self, qualified_op_name, op_name, op, overload_names):
# These attributes are accessible on the object through the properties
# defined below but are immutable
self._qualified_op_name = qualified_op_name
self.__name__ = op_name
self._op = op
self._overload_names = overload_names
# it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op.
def __deepcopy__(self, memo=None):
return self
def __repr__(self):
return "<OpOverloadPacket(op='{}.{}')>".format(
*self._qualified_op_name.split("::")
)
def __hash__(self):
return hash(self._op)
def __str__(self):
return "{}.{}".format(*self._qualified_op_name.split("::"))
@property
def op(self):
return self._op
def __getattr__(self, key):
# It is not a valid op_name when __file__ is passed in
if key == "__file__":
return "torch.ops"
# ensure that query for dunder attributes that does not exist on
# opoverloadpacket but instead exists on the self._op object does not unnecessarily call
# `_get_operation_overload` (which is an expensive operation).
# This is done to prevent any potential slowdown. This list can be extended
# if there exists other attributes like `__name__` that only exist on self._op and not on the
# opoverloadpacket.
# This is ok since we are guaranteed that an overload name for an aten op can't start with '__'
try:
if key.startswith("__"):
return getattr(self._op, key)
except AttributeError:
# for consistency because it seems weird to
# throw an attribute error with a message containing
# an object name different from the one the attribute
# query was performed on.
raise AttributeError(
"'{}' can't have an overload name beginning with '__' and the "
"underlying op {} has no attribute {} either.".format(
str(self), str(self._op), key
)
) from None
try:
# This is ok since we are guaranteed that an overload name for an aten op can't be 'default'
use_key = "" if key == "default" else key
# TODO: disallow access to overloads registered by JIT
op_, op_dk_, tags = torch._C._get_operation_overload(
self._qualified_op_name, use_key
)
schema = torch._C._get_schema(self._qualified_op_name, use_key)
overload = OpOverload(self, op_, op_dk_, schema, tags)
# cache the overload object
setattr(self, key, overload)
return overload
except RuntimeError:
raise AttributeError(
"The underlying op of '{}' has no overload name '{}'".format(
str(self), key
)
) from None
def __call__(self, *args, **kwargs):
# overloading __call__ to ensure torch.ops.foo.bar()
# is still callable from JIT
# We save the function ptr as the `op` attribute on
# OpOverloadPacket to access it here.
return self._op(*args, **kwargs or {})
# TODO: use this to make a __dir__
def overloads(self):
return [n if n else "default" for n in self._overload_names]
# Resolution of torch.fn is different from torch.ops.aten.fn
# torch.fn uses the Python argparser, matches with the
# appropriate schema, and calls into the unboxed version of the method
# torch.ops.aten.fn resolution is done via the mechanism defined in JIT.
# JIT creates a stack of all the overloads and then tries to match the
# correct one at runtime and always calls into the boxed version of the method
# Autograd codegen creates VariableType, TracerType,
# inplace or view type and python bindings.
# Aten codegen generates tensor methods for the the tensor class.
# _OpNamespace is a subclass of ModuleType because the torch script
# allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
# to work from script, we need to ensure ops and foo are modules
class _OpNamespace(types.ModuleType):
"""
An op namespace to dynamically bind Operators into Python.
Say a user has created a custom Operator called "my_namespace::my_op". To
call this op, the user will write torch.ops.my_namespace.my_op(...).
At startup, this operation will not yet be bound into Python. Instead, the
following sequence of magic tricks will occur:
1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method
on the `torch.ops` object, which will create a new `_OpNamespace`
object called `my_namespace` and set it as an attribute on the `ops`
object.
2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on
the `my_namespace` object, which will retrieve the operation via
`torch.get_operation`, a function bound from C++, and then in a similar
fashion bind this new object onto the `my_namespace` object.
3. `torch.ops.my_namespace.my_op(...)` then calls this new operation
and subsequent accesses will incur no further lookup (the namespace and
operation will already exist).
"""
def __init__(self, name):
super(_OpNamespace, self).__init__("torch.ops." + name)
self.name = name
def __getattr__(self, op_name):
# It is not a valid op_name when __file__ is passed in
if op_name == "__file__":
return "torch.ops"
# Get the op `my_namespace::my_op` if available. This will also check
# for overloads and raise an exception if there are more than one.
namespace_name = self.name
qualified_op_name = "{}::{}".format(namespace_name, op_name)
try:
op, overload_names = torch._C._jit_get_operation(qualified_op_name)
except RuntimeError as e:
# Turn this into AttributeError so getattr(obj, key, default)
# works (this is called by TorchScript with __origin__)
raise AttributeError(
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
) from e
# let the script frontend know that op is identical to the builtin op
# with qualified_op_name
torch.jit._builtins._register_builtin(op, qualified_op_name)
op.__module__ = self.__module__ + "." + namespace_name
opoverloadpacket = OpOverloadPacket(
qualified_op_name, op_name, op, overload_names
)
opoverloadpacket.__module__ = self.__module__ + "." + namespace_name
# cache the opoverloadpacket to ensure that each op corresponds to
# a unique OpOverloadPacket object
setattr(self, op_name, opoverloadpacket)
return opoverloadpacket
class _Ops(types.ModuleType):
__file__ = "_ops.py"
def __init__(self):
super(_Ops, self).__init__("torch.ops")
self.loaded_libraries = set()
def __getattr__(self, name):
# Here we are creating `torch.ops.my_namespace`
namespace = _OpNamespace(name)
setattr(self, name, namespace)
return namespace
def load_library(self, path):
"""
Loads a shared library from the given path into the current process.
The library being loaded may run global initialization code to register
custom operators with the PyTorch JIT runtime. This allows dynamically
loading custom operators. For this, you should compile your operator
and the static registration code into a shared library object, and then
call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
shared object.
After the library is loaded, it is added to the
``torch.ops.loaded_libraries`` attribute, a set that may be inspected
for the paths of all libraries loaded using this function.
Args:
path (str): A path to a shared library to load.
"""
if sys.executable == "torch_deploy":
return
path = _utils_internal.resolve_library_path(path)
with dl_open_guard():
# Import the shared library into the process, thus running its
# static (global) initialization code in order to register custom
# operators with the JIT.
ctypes.CDLL(path)
self.loaded_libraries.add(path)
# The ops "namespace"
ops = _Ops()
|
pytorch-master
|
torch/_ops.py
|
import os
import sys
import tempfile
# this arbitrary-looking assortment of functionality is provided here
# to have a central place for overrideable behavior. The motivating
# use is the FB build environment, where this source file is replaced
# by an equivalent.
if sys.executable == "torch_deploy":
# __file__ is meaningless in the context of frozen torch used in torch deploy.
# setting empty torch_parent should allow below functions to operate without crashing,
# but it's unclear if there is a valid use case for them in the context of deploy.
torch_parent = ""
else:
if os.path.basename(os.path.dirname(__file__)) == "shared":
torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
else:
torch_parent = os.path.dirname(os.path.dirname(__file__))
def get_file_path(*path_components: str) -> str:
return os.path.join(torch_parent, *path_components)
def get_file_path_2(*path_components: str) -> str:
return os.path.join(*path_components)
def get_writable_path(path: str) -> str:
if os.access(path, os.W_OK):
return path
return tempfile.mkdtemp(suffix=os.path.basename(path))
def prepare_multiprocessing_environment(path: str) -> None:
pass
def resolve_library_path(path: str) -> str:
return os.path.realpath(path)
TEST_MASTER_ADDR = "127.0.0.1"
TEST_MASTER_PORT = 29500
# USE_GLOBAL_DEPS controls whether __init__.py tries to load
# libtorch_global_deps, see Note [Global dependencies]
USE_GLOBAL_DEPS = True
# USE_RTLD_GLOBAL_WITH_LIBTORCH controls whether __init__.py tries to load
# _C.so with RTLD_GLOBAL during the call to dlopen.
USE_RTLD_GLOBAL_WITH_LIBTORCH = False
|
pytorch-master
|
torch/_utils_internal.py
|
import torch
from typing import Optional
class SobolEngine(object):
r"""
The :class:`torch.quasirandom.SobolEngine` is an engine for generating
(scrambled) Sobol sequences. Sobol sequences are an example of low
discrepancy quasi-random sequences.
This implementation of an engine for Sobol sequences is capable of
sampling sequences up to a maximum dimension of 21201. It uses direction
numbers from https://web.maths.unsw.edu.au/~fkuo/sobol/ obtained using the
search criterion D(6) up to the dimension 21201. This is the recommended
choice by the authors.
References:
- Art B. Owen. Scrambling Sobol and Niederreiter-Xing points.
Journal of Complexity, 14(4):466-489, December 1998.
- I. M. Sobol. The distribution of points in a cube and the accurate
evaluation of integrals.
Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, 1967.
Args:
dimension (Int): The dimensionality of the sequence to be drawn
scramble (bool, optional): Setting this to ``True`` will produce
scrambled Sobol sequences. Scrambling is
capable of producing better Sobol
sequences. Default: ``False``.
seed (Int, optional): This is the seed for the scrambling. The seed
of the random number generator is set to this,
if specified. Otherwise, it uses a random seed.
Default: ``None``
Examples::
>>> # xdoctest: +SKIP("unseeded random state")
>>> soboleng = torch.quasirandom.SobolEngine(dimension=5)
>>> soboleng.draw(3)
tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.5000, 0.5000, 0.5000, 0.5000, 0.5000],
[0.7500, 0.2500, 0.2500, 0.2500, 0.7500]])
"""
MAXBIT = 30
MAXDIM = 21201
def __init__(self, dimension, scramble=False, seed=None):
if dimension > self.MAXDIM or dimension < 1:
raise ValueError("Supported range of dimensionality "
f"for SobolEngine is [1, {self.MAXDIM}]")
self.seed = seed
self.scramble = scramble
self.dimension = dimension
cpu = torch.device("cpu")
self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long)
torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension)
if not self.scramble:
self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long)
else:
self._scramble()
self.quasi = self.shift.clone(memory_format=torch.contiguous_format)
self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1)
self.num_generated = 0
def draw(self, n: int = 1, out: Optional[torch.Tensor] = None,
dtype: torch.dtype = torch.float32) -> torch.Tensor:
r"""
Function to draw a sequence of :attr:`n` points from a Sobol sequence.
Note that the samples are dependent on the previous samples. The size
of the result is :math:`(n, dimension)`.
Args:
n (Int, optional): The length of sequence of points to draw.
Default: 1
out (Tensor, optional): The output tensor
dtype (:class:`torch.dtype`, optional): the desired data type of the
returned tensor.
Default: ``torch.float32``
"""
if self.num_generated == 0:
if n == 1:
result = self._first_point.to(dtype)
else:
result, self.quasi = torch._sobol_engine_draw(
self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype,
)
result = torch.cat((self._first_point, result), dim=-2)
else:
result, self.quasi = torch._sobol_engine_draw(
self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1, dtype=dtype,
)
self.num_generated += n
if out is not None:
out.resize_as_(result).copy_(result)
return out
return result
def draw_base2(self, m: int, out: Optional[torch.Tensor] = None,
dtype: torch.dtype = torch.float32) -> torch.Tensor:
r"""
Function to draw a sequence of :attr:`2**m` points from a Sobol sequence.
Note that the samples are dependent on the previous samples. The size
of the result is :math:`(2**m, dimension)`.
Args:
m (Int): The (base2) exponent of the number of points to draw.
out (Tensor, optional): The output tensor
dtype (:class:`torch.dtype`, optional): the desired data type of the
returned tensor.
Default: ``torch.float32``
"""
n = 2 ** m
total_n = self.num_generated + n
if not (total_n & (total_n - 1) == 0):
raise ValueError("The balance properties of Sobol' points require "
"n to be a power of 2. {0} points have been "
"previously generated, then: n={0}+2**{1}={2}. "
"If you still want to do this, please use "
"'SobolEngine.draw()' instead."
.format(self.num_generated, m, total_n))
return self.draw(n=n, out=out, dtype=dtype)
def reset(self):
r"""
Function to reset the ``SobolEngine`` to base state.
"""
self.quasi.copy_(self.shift)
self.num_generated = 0
return self
def fast_forward(self, n):
r"""
Function to fast-forward the state of the ``SobolEngine`` by
:attr:`n` steps. This is equivalent to drawing :attr:`n` samples
without using the samples.
Args:
n (Int): The number of steps to fast-forward by.
"""
if self.num_generated == 0:
torch._sobol_engine_ff_(self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated)
else:
torch._sobol_engine_ff_(self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1)
self.num_generated += n
return self
def _scramble(self):
g: Optional[torch.Generator] = None
if self.seed is not None:
g = torch.Generator()
g.manual_seed(self.seed)
cpu = torch.device("cpu")
# Generate shift vector
shift_ints = torch.randint(2, (self.dimension, self.MAXBIT), device=cpu, generator=g)
self.shift = torch.mv(shift_ints, torch.pow(2, torch.arange(0, self.MAXBIT, device=cpu)))
# Generate lower triangular matrices (stacked across dimensions)
ltm_dims = (self.dimension, self.MAXBIT, self.MAXBIT)
ltm = torch.randint(2, ltm_dims, device=cpu, generator=g).tril()
torch._sobol_engine_scramble_(self.sobolstate, ltm, self.dimension)
def __repr__(self):
fmt_string = [f'dimension={self.dimension}']
if self.scramble:
fmt_string += ['scramble=True']
if self.seed is not None:
fmt_string += [f'seed={self.seed}']
return self.__class__.__name__ + '(' + ', '.join(fmt_string) + ')'
|
pytorch-master
|
torch/quasirandom.py
|
from typing import Any, Iterable
from .version import __version__ as internal_version
__all__ = ['TorchVersion', 'Version', 'InvalidVersion']
class _LazyImport:
"""Wraps around classes lazy imported from packaging.version
Output of the function v in following snippets are identical:
from packaging.version import Version
def v():
return Version('1.2.3')
and
Version = _LazyImport('Version')
def v():
return Version('1.2.3')
The difference here is that in later example imports
do not happen until v is called
"""
def __init__(self, cls_name: str) -> None:
self._cls_name = cls_name
def get_cls(self):
try:
import packaging.version # type: ignore[import]
except ImportError:
# If packaging isn't installed, try and use the vendored copy
# in pkg_resources
from pkg_resources import packaging # type: ignore[attr-defined, no-redef]
return getattr(packaging.version, self._cls_name)
def __call__(self, *args, **kwargs):
return self.get_cls()(*args, **kwargs)
def __instancecheck__(self, obj):
return isinstance(obj, self.get_cls())
Version = _LazyImport("Version")
InvalidVersion = _LazyImport("InvalidVersion")
class TorchVersion(str):
"""A string with magic powers to compare to both Version and iterables!
Prior to 1.10.0 torch.__version__ was stored as a str and so many did
comparisons against torch.__version__ as if it were a str. In order to not
break them we have TorchVersion which masquerades as a str while also
having the ability to compare against both packaging.version.Version as
well as tuples of values, eg. (1, 2, 1)
Examples:
Comparing a TorchVersion object to a Version object
TorchVersion('1.10.0a') > Version('1.10.0a')
Comparing a TorchVersion object to a Tuple object
TorchVersion('1.10.0a') > (1, 2) # 1.2
TorchVersion('1.10.0a') > (1, 2, 1) # 1.2.1
Comparing a TorchVersion object against a string
TorchVersion('1.10.0a') > '1.2'
TorchVersion('1.10.0a') > '1.2.1'
"""
# fully qualified type names here to appease mypy
def _convert_to_version(self, inp: Any) -> Any:
if isinstance(inp, Version.get_cls()):
return inp
elif isinstance(inp, str):
return Version(inp)
elif isinstance(inp, Iterable):
# Ideally this should work for most cases by attempting to group
# the version tuple, assuming the tuple looks (MAJOR, MINOR, ?PATCH)
# Examples:
# * (1) -> Version("1")
# * (1, 20) -> Version("1.20")
# * (1, 20, 1) -> Version("1.20.1")
return Version('.'.join((str(item) for item in inp)))
else:
raise InvalidVersion(inp)
def _cmp_wrapper(self, cmp: Any, method: str) -> bool:
try:
return getattr(Version(self), method)(self._convert_to_version(cmp))
except BaseException as e:
if not isinstance(e, InvalidVersion.get_cls()):
raise
# Fall back to regular string comparison if dealing with an invalid
# version like 'parrot'
return getattr(super(), method)(cmp)
for cmp_method in ["__gt__", "__lt__", "__eq__", "__ge__", "__le__"]:
setattr(TorchVersion, cmp_method, lambda x, y, method=cmp_method: x._cmp_wrapper(y, method))
__version__ = TorchVersion(internal_version)
|
pytorch-master
|
torch/torch_version.py
|
"""
This makes the functions in torch._C._VariableFunctions available as
torch._VF.<funcname>
without mypy being able to find them.
A subset of those functions are mapped to ATen functions in
torch/jit/_builtins.py
See https://github.com/pytorch/pytorch/issues/21478 for the reason for
introducing torch._VF
"""
import sys
import types
import torch
class VFModule(types.ModuleType):
vf: types.ModuleType
def __init__(self, name):
super(VFModule, self).__init__(name)
self.vf = torch._C._VariableFunctions
def __getattr__(self, attr):
return getattr(self.vf, attr)
sys.modules[__name__] = VFModule(__name__)
|
pytorch-master
|
torch/_VF.py
|
"""Adds docstrings to Tensor functions"""
import torch._C
from torch._C import _add_docstr as add_docstr
from ._torch_docs import parse_kwargs, reproducibility_notes
def add_docstr_all(method, docstr):
add_docstr(getattr(torch._C._TensorBase, method), docstr)
common_args = parse_kwargs(
"""
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
"""
)
new_common_args = parse_kwargs(
"""
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
Default: if None, same :class:`torch.dtype` as this tensor.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, same :class:`torch.device` as this tensor.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
"""
)
add_docstr_all(
"new_tensor",
r"""
new_tensor(data, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a new Tensor with :attr:`data` as the tensor data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
.. warning::
:func:`new_tensor` always copies :attr:`data`. If you have a Tensor
``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
or :func:`torch.Tensor.detach`.
If you have a numpy array and want to avoid a copy, use
:func:`torch.from_numpy`.
.. warning::
When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
The equivalents using ``clone()`` and ``detach()`` are recommended.
Args:
data (array_like): The returned Tensor copies :attr:`data`.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.int8)
>>> data = [[0, 1], [2, 3]]
>>> tensor.new_tensor(data)
tensor([[ 0, 1],
[ 2, 3]], dtype=torch.int8)
""".format(
**new_common_args
),
)
add_docstr_all(
"new_full",
r"""
new_full(size, fill_value, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
fill_value (scalar): the number to fill the output tensor with.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones((2,), dtype=torch.float64)
>>> tensor.new_full((3, 4), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64)
""".format(
**new_common_args
),
)
add_docstr_all(
"new_empty",
r"""
new_empty(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with uninitialized data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones(())
>>> tensor.new_empty((2, 3))
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
""".format(
**new_common_args
),
)
add_docstr_all(
"new_empty_strided",
r"""
new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
uninitialized data. By default, the returned Tensor has the same
:class:`torch.dtype` and :class:`torch.device` as this tensor.
Args:
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.ones(())
>>> tensor.new_empty_strided((2, 3), (3, 1))
tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30],
[ 3.0949e-41, 4.4842e-44, 0.0000e+00]])
""".format(
**new_common_args
),
)
add_docstr_all(
"new_ones",
r"""
new_ones(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``1``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.int32)
>>> tensor.new_ones((2, 3))
tensor([[ 1, 1, 1],
[ 1, 1, 1]], dtype=torch.int32)
""".format(
**new_common_args
),
)
add_docstr_all(
"new_zeros",
r"""
new_zeros(size, dtype=None, device=None, requires_grad=False) -> Tensor
Returns a Tensor of size :attr:`size` filled with ``0``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
{dtype}
{device}
{requires_grad}
Example::
>>> tensor = torch.tensor((), dtype=torch.float64)
>>> tensor.new_zeros((2, 3))
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=torch.float64)
""".format(
**new_common_args
),
)
add_docstr_all(
"abs",
r"""
abs() -> Tensor
See :func:`torch.abs`
""",
)
add_docstr_all(
"abs_",
r"""
abs_() -> Tensor
In-place version of :meth:`~Tensor.abs`
""",
)
add_docstr_all(
"absolute",
r"""
absolute() -> Tensor
Alias for :func:`abs`
""",
)
add_docstr_all(
"absolute_",
r"""
absolute_() -> Tensor
In-place version of :meth:`~Tensor.absolute`
Alias for :func:`abs_`
""",
)
add_docstr_all(
"acos",
r"""
acos() -> Tensor
See :func:`torch.acos`
""",
)
add_docstr_all(
"acos_",
r"""
acos_() -> Tensor
In-place version of :meth:`~Tensor.acos`
""",
)
add_docstr_all(
"arccos",
r"""
arccos() -> Tensor
See :func:`torch.arccos`
""",
)
add_docstr_all(
"arccos_",
r"""
arccos_() -> Tensor
In-place version of :meth:`~Tensor.arccos`
""",
)
add_docstr_all(
"acosh",
r"""
acosh() -> Tensor
See :func:`torch.acosh`
""",
)
add_docstr_all(
"acosh_",
r"""
acosh_() -> Tensor
In-place version of :meth:`~Tensor.acosh`
""",
)
add_docstr_all(
"arccosh",
r"""
acosh() -> Tensor
See :func:`torch.arccosh`
""",
)
add_docstr_all(
"arccosh_",
r"""
acosh_() -> Tensor
In-place version of :meth:`~Tensor.arccosh`
""",
)
add_docstr_all(
"add",
r"""
add(other, *, alpha=1) -> Tensor
Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
and :attr:`other` are specified, each element of :attr:`other` is scaled by
:attr:`alpha` before being used.
When :attr:`other` is a tensor, the shape of :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor
See :func:`torch.add`
""",
)
add_docstr_all(
"add_",
r"""
add_(other, *, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.add`
""",
)
add_docstr_all(
"addbmm",
r"""
addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addbmm`
""",
)
add_docstr_all(
"addbmm_",
r"""
addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addbmm`
""",
)
add_docstr_all(
"addcdiv",
r"""
addcdiv(tensor1, tensor2, *, value=1) -> Tensor
See :func:`torch.addcdiv`
""",
)
add_docstr_all(
"addcdiv_",
r"""
addcdiv_(tensor1, tensor2, *, value=1) -> Tensor
In-place version of :meth:`~Tensor.addcdiv`
""",
)
add_docstr_all(
"addcmul",
r"""
addcmul(tensor1, tensor2, *, value=1) -> Tensor
See :func:`torch.addcmul`
""",
)
add_docstr_all(
"addcmul_",
r"""
addcmul_(tensor1, tensor2, *, value=1) -> Tensor
In-place version of :meth:`~Tensor.addcmul`
""",
)
add_docstr_all(
"addmm",
r"""
addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addmm`
""",
)
add_docstr_all(
"addmm_",
r"""
addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addmm`
""",
)
add_docstr_all(
"addmv",
r"""
addmv(mat, vec, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addmv`
""",
)
add_docstr_all(
"addmv_",
r"""
addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addmv`
""",
)
add_docstr_all(
"sspaddmm",
r"""
sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.sspaddmm`
""",
)
add_docstr_all(
"smm",
r"""
smm(mat) -> Tensor
See :func:`torch.smm`
""",
)
add_docstr_all(
"addr",
r"""
addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.addr`
""",
)
add_docstr_all(
"addr_",
r"""
addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.addr`
""",
)
add_docstr_all(
"align_as",
r"""
align_as(other) -> Tensor
Permutes the dimensions of the :attr:`self` tensor to match the dimension order
in the :attr:`other` tensor, adding size-one dims for any new names.
This operation is useful for explicit broadcasting by names (see examples).
All of the dims of :attr:`self` must be named in order to use this method.
The resulting tensor is a view on the original tensor.
All dimension names of :attr:`self` must be present in ``other.names``.
:attr:`other` may contain named dimensions that are not in ``self.names``;
the output tensor has a size-one dimension for each of those new names.
To align a tensor to a specific order, use :meth:`~Tensor.align_to`.
Examples::
# Example 1: Applying a mask
>>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
>>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
>>> imgs.masked_fill_(mask.align_as(imgs), 0)
# Example 2: Applying a per-channel-scale
>>> def scale_channels(input, scale):
>>> scale = scale.refine_names('C')
>>> return input * scale.align_as(input)
>>> num_channels = 3
>>> scale = torch.randn(num_channels, names=('C',))
>>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
>>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
>>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))
# scale_channels is agnostic to the dimension order of the input
>>> scale_channels(imgs, scale)
>>> scale_channels(more_imgs, scale)
>>> scale_channels(videos, scale)
.. warning::
The named tensor API is experimental and subject to change.
""",
)
add_docstr_all(
"all",
r"""
all(dim=None, keepdim=False) -> Tensor
See :func:`torch.all`
""",
)
add_docstr_all(
"allclose",
r"""
allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
See :func:`torch.allclose`
""",
)
add_docstr_all(
"angle",
r"""
angle() -> Tensor
See :func:`torch.angle`
""",
)
add_docstr_all(
"any",
r"""
any(dim=None, keepdim=False) -> Tensor
See :func:`torch.any`
""",
)
add_docstr_all(
"apply_",
r"""
apply_(callable) -> Tensor
Applies the function :attr:`callable` to each element in the tensor, replacing
each element with the value returned by :attr:`callable`.
.. note::
This function only works with CPU tensors and should not be used in code
sections that require high performance.
""",
)
add_docstr_all(
"asin",
r"""
asin() -> Tensor
See :func:`torch.asin`
""",
)
add_docstr_all(
"asin_",
r"""
asin_() -> Tensor
In-place version of :meth:`~Tensor.asin`
""",
)
add_docstr_all(
"arcsin",
r"""
arcsin() -> Tensor
See :func:`torch.arcsin`
""",
)
add_docstr_all(
"arcsin_",
r"""
arcsin_() -> Tensor
In-place version of :meth:`~Tensor.arcsin`
""",
)
add_docstr_all(
"asinh",
r"""
asinh() -> Tensor
See :func:`torch.asinh`
""",
)
add_docstr_all(
"asinh_",
r"""
asinh_() -> Tensor
In-place version of :meth:`~Tensor.asinh`
""",
)
add_docstr_all(
"arcsinh",
r"""
arcsinh() -> Tensor
See :func:`torch.arcsinh`
""",
)
add_docstr_all(
"arcsinh_",
r"""
arcsinh_() -> Tensor
In-place version of :meth:`~Tensor.arcsinh`
""",
)
add_docstr_all(
"as_strided",
r"""
as_strided(size, stride, storage_offset=None) -> Tensor
See :func:`torch.as_strided`
""",
)
add_docstr_all(
"atan",
r"""
atan() -> Tensor
See :func:`torch.atan`
""",
)
add_docstr_all(
"atan_",
r"""
atan_() -> Tensor
In-place version of :meth:`~Tensor.atan`
""",
)
add_docstr_all(
"arctan",
r"""
arctan() -> Tensor
See :func:`torch.arctan`
""",
)
add_docstr_all(
"arctan_",
r"""
arctan_() -> Tensor
In-place version of :meth:`~Tensor.arctan`
""",
)
add_docstr_all(
"atan2",
r"""
atan2(other) -> Tensor
See :func:`torch.atan2`
""",
)
add_docstr_all(
"atan2_",
r"""
atan2_(other) -> Tensor
In-place version of :meth:`~Tensor.atan2`
""",
)
add_docstr_all(
"arctan2",
r"""
arctan2(other) -> Tensor
See :func:`torch.arctan2`
""",
)
add_docstr_all(
"arctan2_",
r"""
atan2_(other) -> Tensor
In-place version of :meth:`~Tensor.arctan2`
""",
)
add_docstr_all(
"atanh",
r"""
atanh() -> Tensor
See :func:`torch.atanh`
""",
)
add_docstr_all(
"atanh_",
r"""
atanh_(other) -> Tensor
In-place version of :meth:`~Tensor.atanh`
""",
)
add_docstr_all(
"arctanh",
r"""
arctanh() -> Tensor
See :func:`torch.arctanh`
""",
)
add_docstr_all(
"arctanh_",
r"""
arctanh_(other) -> Tensor
In-place version of :meth:`~Tensor.arctanh`
""",
)
add_docstr_all(
"baddbmm",
r"""
baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor
See :func:`torch.baddbmm`
""",
)
add_docstr_all(
"baddbmm_",
r"""
baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.baddbmm`
""",
)
add_docstr_all(
"bernoulli",
r"""
bernoulli(*, generator=None) -> Tensor
Returns a result tensor where each :math:`\texttt{result[i]}` is independently
sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
floating point ``dtype``, and the result will have the same ``dtype``.
See :func:`torch.bernoulli`
""",
)
add_docstr_all(
"bernoulli_",
r"""
bernoulli_(p=0.5, *, generator=None) -> Tensor
Fills each location of :attr:`self` with an independent sample from
:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
``dtype``.
:attr:`p` should either be a scalar or tensor containing probabilities to be
used for drawing the binary random number.
If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
will be set to a value sampled from
:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
floating point ``dtype``.
See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
""",
)
add_docstr_all(
"bincount",
r"""
bincount(weights=None, minlength=0) -> Tensor
See :func:`torch.bincount`
""",
)
add_docstr_all(
"bitwise_not",
r"""
bitwise_not() -> Tensor
See :func:`torch.bitwise_not`
""",
)
add_docstr_all(
"bitwise_not_",
r"""
bitwise_not_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_not`
""",
)
add_docstr_all(
"bitwise_and",
r"""
bitwise_and() -> Tensor
See :func:`torch.bitwise_and`
""",
)
add_docstr_all(
"bitwise_and_",
r"""
bitwise_and_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_and`
""",
)
add_docstr_all(
"bitwise_or",
r"""
bitwise_or() -> Tensor
See :func:`torch.bitwise_or`
""",
)
add_docstr_all(
"bitwise_or_",
r"""
bitwise_or_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_or`
""",
)
add_docstr_all(
"bitwise_xor",
r"""
bitwise_xor() -> Tensor
See :func:`torch.bitwise_xor`
""",
)
add_docstr_all(
"bitwise_xor_",
r"""
bitwise_xor_() -> Tensor
In-place version of :meth:`~Tensor.bitwise_xor`
""",
)
add_docstr_all(
"bitwise_left_shift",
r"""
bitwise_left_shift(other) -> Tensor
See :func:`torch.bitwise_left_shift`
""",
)
add_docstr_all(
"bitwise_left_shift_",
r"""
bitwise_left_shift_(other) -> Tensor
In-place version of :meth:`~Tensor.bitwise_left_shift`
""",
)
add_docstr_all(
"bitwise_right_shift",
r"""
bitwise_right_shift(other) -> Tensor
See :func:`torch.bitwise_right_shift`
""",
)
add_docstr_all(
"bitwise_right_shift_",
r"""
bitwise_right_shift_(other) -> Tensor
In-place version of :meth:`~Tensor.bitwise_right_shift`
""",
)
add_docstr_all(
"broadcast_to",
r"""
broadcast_to(shape) -> Tensor
See :func:`torch.broadcast_to`.
""",
)
add_docstr_all(
"logical_and",
r"""
logical_and() -> Tensor
See :func:`torch.logical_and`
""",
)
add_docstr_all(
"logical_and_",
r"""
logical_and_() -> Tensor
In-place version of :meth:`~Tensor.logical_and`
""",
)
add_docstr_all(
"logical_not",
r"""
logical_not() -> Tensor
See :func:`torch.logical_not`
""",
)
add_docstr_all(
"logical_not_",
r"""
logical_not_() -> Tensor
In-place version of :meth:`~Tensor.logical_not`
""",
)
add_docstr_all(
"logical_or",
r"""
logical_or() -> Tensor
See :func:`torch.logical_or`
""",
)
add_docstr_all(
"logical_or_",
r"""
logical_or_() -> Tensor
In-place version of :meth:`~Tensor.logical_or`
""",
)
add_docstr_all(
"logical_xor",
r"""
logical_xor() -> Tensor
See :func:`torch.logical_xor`
""",
)
add_docstr_all(
"logical_xor_",
r"""
logical_xor_() -> Tensor
In-place version of :meth:`~Tensor.logical_xor`
""",
)
add_docstr_all(
"bmm",
r"""
bmm(batch2) -> Tensor
See :func:`torch.bmm`
""",
)
add_docstr_all(
"cauchy_",
r"""
cauchy_(median=0, sigma=1, *, generator=None) -> Tensor
Fills the tensor with numbers drawn from the Cauchy distribution:
.. math::
f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}
""",
)
add_docstr_all(
"ceil",
r"""
ceil() -> Tensor
See :func:`torch.ceil`
""",
)
add_docstr_all(
"ceil_",
r"""
ceil_() -> Tensor
In-place version of :meth:`~Tensor.ceil`
""",
)
add_docstr_all(
"cholesky",
r"""
cholesky(upper=False) -> Tensor
See :func:`torch.cholesky`
""",
)
add_docstr_all(
"cholesky_solve",
r"""
cholesky_solve(input2, upper=False) -> Tensor
See :func:`torch.cholesky_solve`
""",
)
add_docstr_all(
"cholesky_inverse",
r"""
cholesky_inverse(upper=False) -> Tensor
See :func:`torch.cholesky_inverse`
""",
)
add_docstr_all(
"clamp",
r"""
clamp(min=None, max=None) -> Tensor
See :func:`torch.clamp`
""",
)
add_docstr_all(
"clamp_",
r"""
clamp_(min=None, max=None) -> Tensor
In-place version of :meth:`~Tensor.clamp`
""",
)
add_docstr_all(
"clip",
r"""
clip(min=None, max=None) -> Tensor
Alias for :meth:`~Tensor.clamp`.
""",
)
add_docstr_all(
"clip_",
r"""
clip_(min=None, max=None) -> Tensor
Alias for :meth:`~Tensor.clamp_`.
""",
)
add_docstr_all(
"clone",
r"""
clone(*, memory_format=torch.preserve_format) -> Tensor
See :func:`torch.clone`
""".format(
**common_args
),
)
add_docstr_all(
"coalesce",
r"""
coalesce() -> Tensor
Returns a coalesced copy of :attr:`self` if :attr:`self` is an
:ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.
Returns :attr:`self` if :attr:`self` is a coalesced tensor.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
""",
)
add_docstr_all(
"contiguous",
r"""
contiguous(memory_format=torch.contiguous_format) -> Tensor
Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
:attr:`self` tensor is already in the specified memory format, this function returns the
:attr:`self` tensor.
Args:
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
""",
)
add_docstr_all(
"copy_",
r"""
copy_(src, non_blocking=False) -> Tensor
Copies the elements from :attr:`src` into :attr:`self` tensor and returns
:attr:`self`.
The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
with the :attr:`self` tensor. It may be of a different data type or reside on a
different device.
Args:
src (Tensor): the source tensor to copy from
non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
the copy may occur asynchronously with respect to the host. For other
cases, this argument has no effect.
""",
)
add_docstr_all(
"conj",
r"""
conj() -> Tensor
See :func:`torch.conj`
""",
)
add_docstr_all(
"conj_physical",
r"""
conj_physical() -> Tensor
See :func:`torch.conj_physical`
""",
)
add_docstr_all(
"conj_physical_",
r"""
conj_physical_() -> Tensor
In-place version of :meth:`~Tensor.conj_physical`
""",
)
add_docstr_all(
"resolve_conj",
r"""
resolve_conj() -> Tensor
See :func:`torch.resolve_conj`
""",
)
add_docstr_all(
"resolve_neg",
r"""
resolve_neg() -> Tensor
See :func:`torch.resolve_neg`
""",
)
add_docstr_all(
"copysign",
r"""
copysign(other) -> Tensor
See :func:`torch.copysign`
""",
)
add_docstr_all(
"copysign_",
r"""
copysign_(other) -> Tensor
In-place version of :meth:`~Tensor.copysign`
""",
)
add_docstr_all(
"cos",
r"""
cos() -> Tensor
See :func:`torch.cos`
""",
)
add_docstr_all(
"cos_",
r"""
cos_() -> Tensor
In-place version of :meth:`~Tensor.cos`
""",
)
add_docstr_all(
"cosh",
r"""
cosh() -> Tensor
See :func:`torch.cosh`
""",
)
add_docstr_all(
"cosh_",
r"""
cosh_() -> Tensor
In-place version of :meth:`~Tensor.cosh`
""",
)
add_docstr_all(
"cpu",
r"""
cpu(memory_format=torch.preserve_format) -> Tensor
Returns a copy of this object in CPU memory.
If this object is already in CPU memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"count_nonzero",
r"""
count_nonzero(dim=None) -> Tensor
See :func:`torch.count_nonzero`
""",
)
add_docstr_all(
"cov",
r"""
cov(*, correction=1, fweights=None, aweights=None) -> Tensor
See :func:`torch.cov`
""",
)
add_docstr_all(
"corrcoef",
r"""
corrcoef() -> Tensor
See :func:`torch.corrcoef`
""",
)
add_docstr_all(
"cross",
r"""
cross(other, dim=None) -> Tensor
See :func:`torch.cross`
""",
)
add_docstr_all(
"cuda",
r"""
cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
device (:class:`torch.device`): The destination GPU device.
Defaults to the current CUDA device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host.
Otherwise, the argument has no effect. Default: ``False``.
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"ipu",
r"""
ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
Returns a copy of this object in IPU memory.
If this object is already in IPU memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
device (:class:`torch.device`): The destination IPU device.
Defaults to the current IPU device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host.
Otherwise, the argument has no effect. Default: ``False``.
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"xpu",
r"""
xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor
Returns a copy of this object in XPU memory.
If this object is already in XPU memory and on the correct device,
then no copy is performed and the original object is returned.
Args:
device (:class:`torch.device`): The destination XPU device.
Defaults to the current XPU device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host.
Otherwise, the argument has no effect. Default: ``False``.
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"logcumsumexp",
r"""
logcumsumexp(dim) -> Tensor
See :func:`torch.logcumsumexp`
""",
)
add_docstr_all(
"cummax",
r"""
cummax(dim) -> (Tensor, Tensor)
See :func:`torch.cummax`
""",
)
add_docstr_all(
"cummin",
r"""
cummin(dim) -> (Tensor, Tensor)
See :func:`torch.cummin`
""",
)
add_docstr_all(
"cumprod",
r"""
cumprod(dim, dtype=None) -> Tensor
See :func:`torch.cumprod`
""",
)
add_docstr_all(
"cumprod_",
r"""
cumprod_(dim, dtype=None) -> Tensor
In-place version of :meth:`~Tensor.cumprod`
""",
)
add_docstr_all(
"cumsum",
r"""
cumsum(dim, dtype=None) -> Tensor
See :func:`torch.cumsum`
""",
)
add_docstr_all(
"cumsum_",
r"""
cumsum_(dim, dtype=None) -> Tensor
In-place version of :meth:`~Tensor.cumsum`
""",
)
add_docstr_all(
"data_ptr",
r"""
data_ptr() -> int
Returns the address of the first element of :attr:`self` tensor.
""",
)
add_docstr_all(
"dequantize",
r"""
dequantize() -> Tensor
Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
""",
)
add_docstr_all(
"dense_dim",
r"""
dense_dim() -> int
Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
.. warning::
Throws an error if :attr:`self` is not a sparse tensor.
See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
""",
)
add_docstr_all(
"diag",
r"""
diag(diagonal=0) -> Tensor
See :func:`torch.diag`
""",
)
add_docstr_all(
"diag_embed",
r"""
diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor
See :func:`torch.diag_embed`
""",
)
add_docstr_all(
"diagflat",
r"""
diagflat(offset=0) -> Tensor
See :func:`torch.diagflat`
""",
)
add_docstr_all(
"diagonal",
r"""
diagonal(offset=0, dim1=0, dim2=1) -> Tensor
See :func:`torch.diagonal`
""",
)
add_docstr_all(
"diagonal_scatter",
r"""
diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor
See :func:`torch.diagonal_scatter`
""",
)
add_docstr_all(
"as_strided_scatter",
r"""
as_strided_scatter(src, size, stride, storage_offset=0) -> Tensor
See :func:`torch.as_strided_scatter`
""",
)
add_docstr_all(
"fill_diagonal_",
r"""
fill_diagonal_(fill_value, wrap=False) -> Tensor
Fill the main diagonal of a tensor that has at least 2-dimensions.
When dims>2, all dimensions of input must be of equal length.
This function modifies the input tensor in-place, and returns the input tensor.
Arguments:
fill_value (Scalar): the fill value
wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.
Example::
>>> a = torch.zeros(3, 3)
>>> a.fill_diagonal_(5)
tensor([[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.]])
>>> b = torch.zeros(7, 3)
>>> b.fill_diagonal_(5)
tensor([[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
>>> c = torch.zeros(7, 3)
>>> c.fill_diagonal_(5, wrap=True)
tensor([[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.],
[0., 0., 0.],
[5., 0., 0.],
[0., 5., 0.],
[0., 0., 5.]])
""",
)
add_docstr_all(
"floor_divide",
r"""
floor_divide(value) -> Tensor
See :func:`torch.floor_divide`
""",
)
add_docstr_all(
"floor_divide_",
r"""
floor_divide_(value) -> Tensor
In-place version of :meth:`~Tensor.floor_divide`
""",
)
add_docstr_all(
"diff",
r"""
diff(n=1, dim=-1, prepend=None, append=None) -> Tensor
See :func:`torch.diff`
""",
)
add_docstr_all(
"digamma",
r"""
digamma() -> Tensor
See :func:`torch.digamma`
""",
)
add_docstr_all(
"digamma_",
r"""
digamma_() -> Tensor
In-place version of :meth:`~Tensor.digamma`
""",
)
add_docstr_all(
"dim",
r"""
dim() -> int
Returns the number of dimensions of :attr:`self` tensor.
""",
)
add_docstr_all(
"dist",
r"""
dist(other, p=2) -> Tensor
See :func:`torch.dist`
""",
)
add_docstr_all(
"div",
r"""
div(value, *, rounding_mode=None) -> Tensor
See :func:`torch.div`
""",
)
add_docstr_all(
"div_",
r"""
div_(value, *, rounding_mode=None) -> Tensor
In-place version of :meth:`~Tensor.div`
""",
)
add_docstr_all(
"divide",
r"""
divide(value, *, rounding_mode=None) -> Tensor
See :func:`torch.divide`
""",
)
add_docstr_all(
"divide_",
r"""
divide_(value, *, rounding_mode=None) -> Tensor
In-place version of :meth:`~Tensor.divide`
""",
)
add_docstr_all(
"dot",
r"""
dot(other) -> Tensor
See :func:`torch.dot`
""",
)
add_docstr_all(
"eig",
r"""
eig(eigenvectors=False) -> (Tensor, Tensor)
See :func:`torch.eig`
""",
)
add_docstr_all(
"element_size",
r"""
element_size() -> int
Returns the size in bytes of an individual element.
Example::
>>> torch.tensor([]).element_size()
4
>>> torch.tensor([], dtype=torch.uint8).element_size()
1
""",
)
add_docstr_all(
"eq",
r"""
eq(other) -> Tensor
See :func:`torch.eq`
""",
)
add_docstr_all(
"eq_",
r"""
eq_(other) -> Tensor
In-place version of :meth:`~Tensor.eq`
""",
)
add_docstr_all(
"equal",
r"""
equal(other) -> bool
See :func:`torch.equal`
""",
)
add_docstr_all(
"erf",
r"""
erf() -> Tensor
See :func:`torch.erf`
""",
)
add_docstr_all(
"erf_",
r"""
erf_() -> Tensor
In-place version of :meth:`~Tensor.erf`
""",
)
add_docstr_all(
"erfc",
r"""
erfc() -> Tensor
See :func:`torch.erfc`
""",
)
add_docstr_all(
"erfc_",
r"""
erfc_() -> Tensor
In-place version of :meth:`~Tensor.erfc`
""",
)
add_docstr_all(
"erfinv",
r"""
erfinv() -> Tensor
See :func:`torch.erfinv`
""",
)
add_docstr_all(
"erfinv_",
r"""
erfinv_() -> Tensor
In-place version of :meth:`~Tensor.erfinv`
""",
)
add_docstr_all(
"exp",
r"""
exp() -> Tensor
See :func:`torch.exp`
""",
)
add_docstr_all(
"exp_",
r"""
exp_() -> Tensor
In-place version of :meth:`~Tensor.exp`
""",
)
add_docstr_all(
"exp2",
r"""
exp2() -> Tensor
See :func:`torch.exp2`
""",
)
add_docstr_all(
"exp2_",
r"""
exp2_() -> Tensor
In-place version of :meth:`~Tensor.exp2`
""",
)
add_docstr_all(
"expm1",
r"""
expm1() -> Tensor
See :func:`torch.expm1`
""",
)
add_docstr_all(
"expm1_",
r"""
expm1_() -> Tensor
In-place version of :meth:`~Tensor.expm1`
""",
)
add_docstr_all(
"exponential_",
r"""
exponential_(lambd=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the exponential distribution:
.. math::
f(x) = \lambda e^{-\lambda x}
""",
)
add_docstr_all(
"fill_",
r"""
fill_(value) -> Tensor
Fills :attr:`self` tensor with the specified value.
""",
)
add_docstr_all(
"floor",
r"""
floor() -> Tensor
See :func:`torch.floor`
""",
)
add_docstr_all(
"flip",
r"""
flip(dims) -> Tensor
See :func:`torch.flip`
""",
)
add_docstr_all(
"fliplr",
r"""
fliplr() -> Tensor
See :func:`torch.fliplr`
""",
)
add_docstr_all(
"flipud",
r"""
flipud() -> Tensor
See :func:`torch.flipud`
""",
)
add_docstr_all(
"roll",
r"""
roll(shifts, dims) -> Tensor
See :func:`torch.roll`
""",
)
add_docstr_all(
"floor_",
r"""
floor_() -> Tensor
In-place version of :meth:`~Tensor.floor`
""",
)
add_docstr_all(
"fmod",
r"""
fmod(divisor) -> Tensor
See :func:`torch.fmod`
""",
)
add_docstr_all(
"fmod_",
r"""
fmod_(divisor) -> Tensor
In-place version of :meth:`~Tensor.fmod`
""",
)
add_docstr_all(
"frac",
r"""
frac() -> Tensor
See :func:`torch.frac`
""",
)
add_docstr_all(
"frac_",
r"""
frac_() -> Tensor
In-place version of :meth:`~Tensor.frac`
""",
)
add_docstr_all(
"frexp",
r"""
frexp(input) -> (Tensor mantissa, Tensor exponent)
See :func:`torch.frexp`
""",
)
add_docstr_all(
"flatten",
r"""
flatten(start_dim=0, end_dim=-1) -> Tensor
See :func:`torch.flatten`
""",
)
add_docstr_all(
"gather",
r"""
gather(dim, index) -> Tensor
See :func:`torch.gather`
""",
)
add_docstr_all(
"gcd",
r"""
gcd(other) -> Tensor
See :func:`torch.gcd`
""",
)
add_docstr_all(
"gcd_",
r"""
gcd_(other) -> Tensor
In-place version of :meth:`~Tensor.gcd`
""",
)
add_docstr_all(
"ge",
r"""
ge(other) -> Tensor
See :func:`torch.ge`.
""",
)
add_docstr_all(
"ge_",
r"""
ge_(other) -> Tensor
In-place version of :meth:`~Tensor.ge`.
""",
)
add_docstr_all(
"greater_equal",
r"""
greater_equal(other) -> Tensor
See :func:`torch.greater_equal`.
""",
)
add_docstr_all(
"greater_equal_",
r"""
greater_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.greater_equal`.
""",
)
add_docstr_all(
"geometric_",
r"""
geometric_(p, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements drawn from the geometric distribution:
.. math::
f(X=k) = p^{k - 1} (1 - p)
""",
)
add_docstr_all(
"geqrf",
r"""
geqrf() -> (Tensor, Tensor)
See :func:`torch.geqrf`
""",
)
add_docstr_all(
"ger",
r"""
ger(vec2) -> Tensor
See :func:`torch.ger`
""",
)
add_docstr_all(
"inner",
r"""
inner(other) -> Tensor
See :func:`torch.inner`.
""",
)
add_docstr_all(
"outer",
r"""
outer(vec2) -> Tensor
See :func:`torch.outer`.
""",
)
add_docstr_all(
"hypot",
r"""
hypot(other) -> Tensor
See :func:`torch.hypot`
""",
)
add_docstr_all(
"hypot_",
r"""
hypot_(other) -> Tensor
In-place version of :meth:`~Tensor.hypot`
""",
)
add_docstr_all(
"i0",
r"""
i0() -> Tensor
See :func:`torch.i0`
""",
)
add_docstr_all(
"i0_",
r"""
i0_() -> Tensor
In-place version of :meth:`~Tensor.i0`
""",
)
add_docstr_all(
"igamma",
r"""
igamma(other) -> Tensor
See :func:`torch.igamma`
""",
)
add_docstr_all(
"igamma_",
r"""
igamma_(other) -> Tensor
In-place version of :meth:`~Tensor.igamma`
""",
)
add_docstr_all(
"igammac",
r"""
igammac(other) -> Tensor
See :func:`torch.igammac`
""",
)
add_docstr_all(
"igammac_",
r"""
igammac_(other) -> Tensor
In-place version of :meth:`~Tensor.igammac`
""",
)
add_docstr_all(
"indices",
r"""
indices() -> Tensor
Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
See also :meth:`Tensor.values`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""",
)
add_docstr_all(
"get_device",
r"""
get_device() -> Device ordinal (Integer)
For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
For CPU tensors, an error is thrown.
Example::
>>> x = torch.randn(3, 4, 5, device='cuda:0')
>>> x.get_device()
0
>>> x.cpu().get_device() # RuntimeError: get_device is not implemented for type torch.FloatTensor
""",
)
add_docstr_all(
"values",
r"""
values() -> Tensor
Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
See also :meth:`Tensor.indices`.
.. note::
This method can only be called on a coalesced sparse tensor. See
:meth:`Tensor.coalesce` for details.
""",
)
add_docstr_all(
"gt",
r"""
gt(other) -> Tensor
See :func:`torch.gt`.
""",
)
add_docstr_all(
"gt_",
r"""
gt_(other) -> Tensor
In-place version of :meth:`~Tensor.gt`.
""",
)
add_docstr_all(
"greater",
r"""
greater(other) -> Tensor
See :func:`torch.greater`.
""",
)
add_docstr_all(
"greater_",
r"""
greater_(other) -> Tensor
In-place version of :meth:`~Tensor.greater`.
""",
)
add_docstr_all(
"has_names",
r"""
Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.
""",
)
add_docstr_all(
"hardshrink",
r"""
hardshrink(lambd=0.5) -> Tensor
See :func:`torch.nn.functional.hardshrink`
""",
)
add_docstr_all(
"heaviside",
r"""
heaviside(values) -> Tensor
See :func:`torch.heaviside`
""",
)
add_docstr_all(
"heaviside_",
r"""
heaviside_(values) -> Tensor
In-place version of :meth:`~Tensor.heaviside`
""",
)
add_docstr_all(
"histc",
r"""
histc(bins=100, min=0, max=0) -> Tensor
See :func:`torch.histc`
""",
)
add_docstr_all(
"histogram",
r"""
histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)
See :func:`torch.histogram`
""",
)
add_docstr_all(
"index_add_",
r"""
index_add_(dim, index, source, *, alpha=1) -> Tensor
Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
tensor by adding to the indices in the order given in :attr:`index`. For example,
if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
``source`` is subtracted from the ``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of ``source`` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
For a 3-D tensor the output is given as::
self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0
self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1
self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2
Note:
{forward_reproducibility_note}
Args:
dim (int): dimension along which to index
index (Tensor): indices of ``source`` to select from,
should have dtype either `torch.int64` or `torch.int32`
source (Tensor): the tensor containing values to add
Keyword args:
alpha (Number): the scalar multiplier for ``source``
Example::
>>> x = torch.ones(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_add_(0, index, t)
tensor([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]])
>>> x.index_add_(0, index, t, alpha=-1)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]])
""".format(
**reproducibility_notes
),
)
add_docstr_all(
"index_copy_",
r"""
index_copy_(dim, index, tensor) -> Tensor
Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
the indices in the order given in :attr:`index`. For example, if ``dim == 0``
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
``j``\ th row of :attr:`self`.
The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
.. note::
If :attr:`index` contains duplicate entries, multiple elements from
:attr:`tensor` will be copied to the same index of :attr:`self`. The result
is nondeterministic since it depends on which copy occurs last.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`tensor` to select from
tensor (Tensor): the tensor containing values to copy
Example::
>>> x = torch.zeros(5, 3)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2])
>>> x.index_copy_(0, index, t)
tensor([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 7., 8., 9.],
[ 0., 0., 0.],
[ 4., 5., 6.]])
""",
)
add_docstr_all(
"index_fill_",
r"""
index_fill_(dim, index, value) -> Tensor
Fills the elements of the :attr:`self` tensor with value :attr:`value` by
selecting the indices in the order given in :attr:`index`.
Args:
dim (int): dimension along which to index
index (LongTensor): indices of :attr:`self` tensor to fill in
value (float): the value to fill with
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
>>> index = torch.tensor([0, 2])
>>> x.index_fill_(1, index, -1)
tensor([[-1., 2., -1.],
[-1., 5., -1.],
[-1., 8., -1.]])
""",
)
add_docstr_all(
"index_put_",
r"""
index_put_(indices, values, accumulate=False) -> Tensor
Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
the indices specified in :attr:`indices` (which is a tuple of Tensors). The
expression ``tensor.index_put_(indices, values)`` is equivalent to
``tensor[indices] = values``. Returns :attr:`self`.
If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
contain duplicate elements.
Args:
indices (tuple of LongTensor): tensors used to index into `self`.
values (Tensor): tensor of same dtype as `self`.
accumulate (bool): whether to accumulate into self
""",
)
add_docstr_all(
"index_put",
r"""
index_put(indices, values, accumulate=False) -> Tensor
Out-place version of :meth:`~Tensor.index_put_`.
""",
)
add_docstr_all(
"index_reduce_",
r"""
index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor
Accumulate the elements of ``source`` into the :attr:`self`
tensor by accumulating to the indices in the order given in :attr:`index`
using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
:obj:`include_self="True"`, the values in the :attr:`self` tensor are included
in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
to are treated as if they were filled with the reduction identites.
The :attr:`dim`\ th dimension of ``source`` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.
For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
output is given as::
self[index[i], :, :] *= src[i, :, :] # if dim == 0
self[:, index[i], :] *= src[:, i, :] # if dim == 1
self[:, :, index[i]] *= src[:, :, i] # if dim == 2
Note:
{forward_reproducibility_note}
.. note::
This function only supports floating point tensors.
.. warning::
This function is in beta and may change in the near future.
Args:
dim (int): dimension along which to index
index (Tensor): indices of ``source`` to select from,
should have dtype either `torch.int64` or `torch.int32`
source (FloatTensor): the tensor containing values to accumulate
reduce (str): the reduction operation to apply
(:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
Keyword args:
include_self (bool): whether the elements from the ``self`` tensor are
included in the reduction
Example::
>>> x = torch.empty(5, 3).fill_(2)
>>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
>>> index = torch.tensor([0, 4, 2, 0])
>>> x.index_reduce_(0, index, t, 'prod')
tensor([[20., 44., 72.],
[ 2., 2., 2.],
[14., 16., 18.],
[ 2., 2., 2.],
[ 8., 10., 12.]])
>>> x = torch.empty(5, 3).fill_(2)
>>> x.index_reduce_(0, index, t, 'prod', include_self=False)
tensor([[10., 22., 36.],
[ 2., 2., 2.],
[ 7., 8., 9.],
[ 2., 2., 2.],
[ 4., 5., 6.]])
""".format(
**reproducibility_notes
),
)
add_docstr_all(
"index_select",
r"""
index_select(dim, index) -> Tensor
See :func:`torch.index_select`
""",
)
add_docstr_all(
"sparse_mask",
r"""
sparse_mask(mask) -> Tensor
Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
strided tensor :attr:`self` filtered by the indices of the sparse
tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
ignored. :attr:`self` and :attr:`mask` tensors must have the same
shape.
.. note::
The returned sparse tensor has the same indices as the sparse tensor
:attr:`mask`, even when the corresponding values in :attr:`self` are
zeros.
Args:
mask (Tensor): a sparse tensor whose indices are used as a filter
Example::
>>> nse = 5
>>> dims = (5, 5, 2, 2)
>>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
>>> V = torch.randn(nse, dims[2], dims[3])
>>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
>>> D = torch.randn(dims)
>>> D.sparse_mask(S)
tensor(indices=tensor([[0, 0, 0, 2],
[0, 1, 4, 3]]),
values=tensor([[[ 1.6550, 0.2397],
[-0.1611, -0.0779]],
[[ 0.2326, -1.0558],
[ 1.4711, 1.9678]],
[[-0.5138, -0.0411],
[ 1.9417, 0.5158]],
[[ 0.0793, 0.0036],
[-0.2569, -0.1055]]]),
size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
""",
)
add_docstr_all(
"inverse",
r"""
inverse() -> Tensor
See :func:`torch.inverse`
""",
)
add_docstr_all(
"isnan",
r"""
isnan() -> Tensor
See :func:`torch.isnan`
""",
)
add_docstr_all(
"isinf",
r"""
isinf() -> Tensor
See :func:`torch.isinf`
""",
)
add_docstr_all(
"isposinf",
r"""
isposinf() -> Tensor
See :func:`torch.isposinf`
""",
)
add_docstr_all(
"isneginf",
r"""
isneginf() -> Tensor
See :func:`torch.isneginf`
""",
)
add_docstr_all(
"isfinite",
r"""
isfinite() -> Tensor
See :func:`torch.isfinite`
""",
)
add_docstr_all(
"isclose",
r"""
isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
See :func:`torch.isclose`
""",
)
add_docstr_all(
"isreal",
r"""
isreal() -> Tensor
See :func:`torch.isreal`
""",
)
add_docstr_all(
"is_coalesced",
r"""
is_coalesced() -> bool
Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
<sparse-coo-docs>` that is coalesced, ``False`` otherwise.
.. warning::
Throws an error if :attr:`self` is not a sparse COO tensor.
See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
""",
)
add_docstr_all(
"is_contiguous",
r"""
is_contiguous(memory_format=torch.contiguous_format) -> bool
Returns True if :attr:`self` tensor is contiguous in memory in the order specified
by memory format.
Args:
memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
order. Default: ``torch.contiguous_format``.
""",
)
add_docstr_all(
"is_pinned",
r"""
Returns true if this tensor resides in pinned memory.
""",
)
add_docstr_all(
"is_floating_point",
r"""
is_floating_point() -> bool
Returns True if the data type of :attr:`self` is a floating point data type.
""",
)
add_docstr_all(
"is_complex",
r"""
is_complex() -> bool
Returns True if the data type of :attr:`self` is a complex data type.
""",
)
add_docstr_all(
"is_inference",
r"""
is_inference() -> bool
See :func:`torch.is_inference`
""",
)
add_docstr_all(
"is_conj",
r"""
is_conj() -> bool
Returns True if the conjugate bit of :attr:`self` is set to true.
""",
)
add_docstr_all(
"is_neg",
r"""
is_neg() -> bool
Returns True if the negative bit of :attr:`self` is set to true.
""",
)
add_docstr_all(
"is_signed",
r"""
is_signed() -> bool
Returns True if the data type of :attr:`self` is a signed data type.
""",
)
add_docstr_all(
"is_set_to",
r"""
is_set_to(tensor) -> bool
Returns True if both tensors are pointing to the exact same memory (same
storage, offset, size and stride).
""",
)
add_docstr_all(
"item",
r"""
item() -> number
Returns the value of this tensor as a standard Python number. This only works
for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.
This operation is not differentiable.
Example::
>>> x = torch.tensor([1.0])
>>> x.item()
1.0
""",
)
add_docstr_all(
"kron",
r"""
kron(other) -> Tensor
See :func:`torch.kron`
""",
)
add_docstr_all(
"kthvalue",
r"""
kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.kthvalue`
""",
)
add_docstr_all(
"ldexp",
r"""
ldexp(other) -> Tensor
See :func:`torch.ldexp`
""",
)
add_docstr_all(
"ldexp_",
r"""
ldexp_(other) -> Tensor
In-place version of :meth:`~Tensor.ldexp`
""",
)
add_docstr_all(
"lcm",
r"""
lcm(other) -> Tensor
See :func:`torch.lcm`
""",
)
add_docstr_all(
"lcm_",
r"""
lcm_(other) -> Tensor
In-place version of :meth:`~Tensor.lcm`
""",
)
add_docstr_all(
"le",
r"""
le(other) -> Tensor
See :func:`torch.le`.
""",
)
add_docstr_all(
"le_",
r"""
le_(other) -> Tensor
In-place version of :meth:`~Tensor.le`.
""",
)
add_docstr_all(
"less_equal",
r"""
less_equal(other) -> Tensor
See :func:`torch.less_equal`.
""",
)
add_docstr_all(
"less_equal_",
r"""
less_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.less_equal`.
""",
)
add_docstr_all(
"lerp",
r"""
lerp(end, weight) -> Tensor
See :func:`torch.lerp`
""",
)
add_docstr_all(
"lerp_",
r"""
lerp_(end, weight) -> Tensor
In-place version of :meth:`~Tensor.lerp`
""",
)
add_docstr_all(
"lgamma",
r"""
lgamma() -> Tensor
See :func:`torch.lgamma`
""",
)
add_docstr_all(
"lgamma_",
r"""
lgamma_() -> Tensor
In-place version of :meth:`~Tensor.lgamma`
""",
)
add_docstr_all(
"log",
r"""
log() -> Tensor
See :func:`torch.log`
""",
)
add_docstr_all(
"log_",
r"""
log_() -> Tensor
In-place version of :meth:`~Tensor.log`
""",
)
add_docstr_all(
"log10",
r"""
log10() -> Tensor
See :func:`torch.log10`
""",
)
add_docstr_all(
"log10_",
r"""
log10_() -> Tensor
In-place version of :meth:`~Tensor.log10`
""",
)
add_docstr_all(
"log1p",
r"""
log1p() -> Tensor
See :func:`torch.log1p`
""",
)
add_docstr_all(
"log1p_",
r"""
log1p_() -> Tensor
In-place version of :meth:`~Tensor.log1p`
""",
)
add_docstr_all(
"log2",
r"""
log2() -> Tensor
See :func:`torch.log2`
""",
)
add_docstr_all(
"log2_",
r"""
log2_() -> Tensor
In-place version of :meth:`~Tensor.log2`
""",
)
add_docstr_all(
"logaddexp",
r"""
logaddexp(other) -> Tensor
See :func:`torch.logaddexp`
""",
)
add_docstr_all(
"logaddexp2",
r"""
logaddexp2(other) -> Tensor
See :func:`torch.logaddexp2`
""",
)
add_docstr_all(
"log_normal_",
r"""
log_normal_(mean=1, std=2, *, generator=None)
Fills :attr:`self` tensor with numbers samples from the log-normal distribution
parameterized by the given mean :math:`\mu` and standard deviation
:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
standard deviation of the underlying normal distribution, and not of the
returned distribution:
.. math::
f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
""",
)
add_docstr_all(
"logsumexp",
r"""
logsumexp(dim, keepdim=False) -> Tensor
See :func:`torch.logsumexp`
""",
)
add_docstr_all(
"lstsq",
r"""
lstsq(A) -> (Tensor, Tensor)
See :func:`torch.lstsq`
""",
)
add_docstr_all(
"lt",
r"""
lt(other) -> Tensor
See :func:`torch.lt`.
""",
)
add_docstr_all(
"lt_",
r"""
lt_(other) -> Tensor
In-place version of :meth:`~Tensor.lt`.
""",
)
add_docstr_all(
"less",
r"""
lt(other) -> Tensor
See :func:`torch.less`.
""",
)
add_docstr_all(
"less_",
r"""
less_(other) -> Tensor
In-place version of :meth:`~Tensor.less`.
""",
)
add_docstr_all(
"lu_solve",
r"""
lu_solve(LU_data, LU_pivots) -> Tensor
See :func:`torch.lu_solve`
""",
)
add_docstr_all(
"map_",
r"""
map_(tensor, callable)
Applies :attr:`callable` for each element in :attr:`self` tensor and the given
:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.
The :attr:`callable` should have the signature::
def callable(a, b) -> number
""",
)
add_docstr_all(
"masked_scatter_",
r"""
masked_scatter_(mask, source)
Copies elements from :attr:`source` into :attr:`self` tensor at positions where
the :attr:`mask` is True.
The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
with the shape of the underlying tensor. The :attr:`source` should have at least
as many elements as the number of ones in :attr:`mask`
Args:
mask (BoolTensor): the boolean mask
source (Tensor): the tensor to copy from
.. note::
The :attr:`mask` operates on the :attr:`self` tensor, not on the given
:attr:`source` tensor.
""",
)
add_docstr_all(
"masked_fill_",
r"""
masked_fill_(mask, value)
Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
True. The shape of :attr:`mask` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor.
Args:
mask (BoolTensor): the boolean mask
value (float): the value to fill in with
""",
)
add_docstr_all(
"masked_select",
r"""
masked_select(mask) -> Tensor
See :func:`torch.masked_select`
""",
)
add_docstr_all(
"matrix_power",
r"""
matrix_power(n) -> Tensor
.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.
Alias for :func:`torch.linalg.matrix_power`
""",
)
add_docstr_all(
"matrix_exp",
r"""
matrix_exp() -> Tensor
See :func:`torch.matrix_exp`
""",
)
add_docstr_all(
"max",
r"""
max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
See :func:`torch.max`
""",
)
add_docstr_all(
"amax",
r"""
amax(dim=None, keepdim=False) -> Tensor
See :func:`torch.amax`
""",
)
add_docstr_all(
"maximum",
r"""
maximum(other) -> Tensor
See :func:`torch.maximum`
""",
)
add_docstr_all(
"fmax",
r"""
fmax(other) -> Tensor
See :func:`torch.fmax`
""",
)
add_docstr_all(
"argmax",
r"""
argmax(dim=None, keepdim=False) -> LongTensor
See :func:`torch.argmax`
""",
)
add_docstr_all(
"argwhere",
r"""
argwhere() -> Tensor
See :func:`torch.argwhere`
""",
)
add_docstr_all(
"mean",
r"""
mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
See :func:`torch.mean`
""",
)
add_docstr_all(
"nanmean",
r"""
nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
See :func:`torch.nanmean`
""",
)
add_docstr_all(
"median",
r"""
median(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.median`
""",
)
add_docstr_all(
"nanmedian",
r"""
nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.nanmedian`
""",
)
add_docstr_all(
"min",
r"""
min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
See :func:`torch.min`
""",
)
add_docstr_all(
"amin",
r"""
amin(dim=None, keepdim=False) -> Tensor
See :func:`torch.amin`
""",
)
add_docstr_all(
"minimum",
r"""
minimum(other) -> Tensor
See :func:`torch.minimum`
""",
)
add_docstr_all(
"aminmax",
r"""
aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)
See :func:`torch.aminmax`
""",
)
add_docstr_all(
"fmin",
r"""
fmin(other) -> Tensor
See :func:`torch.fmin`
""",
)
add_docstr_all(
"argmin",
r"""
argmin(dim=None, keepdim=False) -> LongTensor
See :func:`torch.argmin`
""",
)
add_docstr_all(
"mm",
r"""
mm(mat2) -> Tensor
See :func:`torch.mm`
""",
)
add_docstr_all(
"mode",
r"""
mode(dim=None, keepdim=False) -> (Tensor, LongTensor)
See :func:`torch.mode`
""",
)
add_docstr_all(
"movedim",
r"""
movedim(source, destination) -> Tensor
See :func:`torch.movedim`
""",
)
add_docstr_all(
"moveaxis",
r"""
moveaxis(source, destination) -> Tensor
See :func:`torch.moveaxis`
""",
)
add_docstr_all(
"mul",
r"""
mul(value) -> Tensor
See :func:`torch.mul`.
""",
)
add_docstr_all(
"mul_",
r"""
mul_(value) -> Tensor
In-place version of :meth:`~Tensor.mul`.
""",
)
add_docstr_all(
"multiply",
r"""
multiply(value) -> Tensor
See :func:`torch.multiply`.
""",
)
add_docstr_all(
"multiply_",
r"""
multiply_(value) -> Tensor
In-place version of :meth:`~Tensor.multiply`.
""",
)
add_docstr_all(
"multinomial",
r"""
multinomial(num_samples, replacement=False, *, generator=None) -> Tensor
See :func:`torch.multinomial`
""",
)
add_docstr_all(
"mv",
r"""
mv(vec) -> Tensor
See :func:`torch.mv`
""",
)
add_docstr_all(
"mvlgamma",
r"""
mvlgamma(p) -> Tensor
See :func:`torch.mvlgamma`
""",
)
add_docstr_all(
"mvlgamma_",
r"""
mvlgamma_(p) -> Tensor
In-place version of :meth:`~Tensor.mvlgamma`
""",
)
add_docstr_all(
"narrow",
r"""
narrow(dimension, start, length) -> Tensor
See :func:`torch.narrow`
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> x.narrow(0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> x.narrow(1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
""",
)
add_docstr_all(
"narrow_copy",
r"""
narrow_copy(dimension, start, length) -> Tensor
Same as :meth:`Tensor.narrow` except returning a copy rather
than shared storage. This is primarily for sparse tensors, which
do not have a shared-storage narrow method. Calling ``narrow_copy``
with ``dimemsion > self.sparse_dim()`` will return a copy with the
relevant dense dimension narrowed, and ``self.shape`` updated accordingly.
""",
)
add_docstr_all(
"ndimension",
r"""
ndimension() -> int
Alias for :meth:`~Tensor.dim()`
""",
)
add_docstr_all(
"nan_to_num",
r"""
nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor
See :func:`torch.nan_to_num`.
""",
)
add_docstr_all(
"nan_to_num_",
r"""
nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor
In-place version of :meth:`~Tensor.nan_to_num`.
""",
)
add_docstr_all(
"ne",
r"""
ne(other) -> Tensor
See :func:`torch.ne`.
""",
)
add_docstr_all(
"ne_",
r"""
ne_(other) -> Tensor
In-place version of :meth:`~Tensor.ne`.
""",
)
add_docstr_all(
"not_equal",
r"""
not_equal(other) -> Tensor
See :func:`torch.not_equal`.
""",
)
add_docstr_all(
"not_equal_",
r"""
not_equal_(other) -> Tensor
In-place version of :meth:`~Tensor.not_equal`.
""",
)
add_docstr_all(
"neg",
r"""
neg() -> Tensor
See :func:`torch.neg`
""",
)
add_docstr_all(
"negative",
r"""
negative() -> Tensor
See :func:`torch.negative`
""",
)
add_docstr_all(
"neg_",
r"""
neg_() -> Tensor
In-place version of :meth:`~Tensor.neg`
""",
)
add_docstr_all(
"negative_",
r"""
negative_() -> Tensor
In-place version of :meth:`~Tensor.negative`
""",
)
add_docstr_all(
"nelement",
r"""
nelement() -> int
Alias for :meth:`~Tensor.numel`
""",
)
add_docstr_all(
"nextafter",
r"""
nextafter(other) -> Tensor
See :func:`torch.nextafter`
""",
)
add_docstr_all(
"nextafter_",
r"""
nextafter_(other) -> Tensor
In-place version of :meth:`~Tensor.nextafter`
""",
)
add_docstr_all(
"nonzero",
r"""
nonzero() -> LongTensor
See :func:`torch.nonzero`
""",
)
add_docstr_all(
"norm",
r"""
norm(p=2, dim=None, keepdim=False) -> Tensor
See :func:`torch.norm`
""",
)
add_docstr_all(
"normal_",
r"""
normal_(mean=0, std=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements samples from the normal distribution
parameterized by :attr:`mean` and :attr:`std`.
""",
)
add_docstr_all(
"numel",
r"""
numel() -> int
See :func:`torch.numel`
""",
)
add_docstr_all(
"numpy",
r"""
numpy(*, force=False) -> numpy.ndarray
Returns the tensor as a NumPy :class:`ndarray`.
If :attr:`force` is ``False`` (the default), the conversion
is performed only if the tensor is on the CPU, does not require grad,
does not have its conjugate bit set, and is a dtype and layout that
NumPy supports. The returned ndarray and the tensor will share their
storage, so changes to the tensor will be reflected in the ndarray
and vice versa.
If :attr:`force` is ``True`` this is equivalent to
calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
If the tensor isn't on the CPU or the conjugate or negative bit is set,
the tensor won't share its storage with the returned ndarray.
Setting :attr:`force` to ``True`` can be a useful shorthand.
Args:
force (bool): if ``True``, the ndarray may be a copy of the tensor
instead of always sharing memory, defaults to ``False``.
""",
)
add_docstr_all(
"orgqr",
r"""
orgqr(input2) -> Tensor
See :func:`torch.orgqr`
""",
)
add_docstr_all(
"ormqr",
r"""
ormqr(input2, input3, left=True, transpose=False) -> Tensor
See :func:`torch.ormqr`
""",
)
add_docstr_all(
"permute",
r"""
permute(*dims) -> Tensor
See :func:`torch.permute`
""",
)
add_docstr_all(
"polygamma",
r"""
polygamma(n) -> Tensor
See :func:`torch.polygamma`
""",
)
add_docstr_all(
"polygamma_",
r"""
polygamma_(n) -> Tensor
In-place version of :meth:`~Tensor.polygamma`
""",
)
add_docstr_all(
"positive",
r"""
positive() -> Tensor
See :func:`torch.positive`
""",
)
add_docstr_all(
"pow",
r"""
pow(exponent) -> Tensor
See :func:`torch.pow`
""",
)
add_docstr_all(
"pow_",
r"""
pow_(exponent) -> Tensor
In-place version of :meth:`~Tensor.pow`
""",
)
add_docstr_all(
"float_power",
r"""
float_power(exponent) -> Tensor
See :func:`torch.float_power`
""",
)
add_docstr_all(
"float_power_",
r"""
float_power_(exponent) -> Tensor
In-place version of :meth:`~Tensor.float_power`
""",
)
add_docstr_all(
"prod",
r"""
prod(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.prod`
""",
)
add_docstr_all(
"put_",
r"""
put_(index, source, accumulate=False) -> Tensor
Copies the elements from :attr:`source` into the positions specified by
:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
it were a 1-D tensor.
:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
the same shape.
If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
contain duplicate elements.
Args:
index (LongTensor): the indices into self
source (Tensor): the tensor containing values to copy from
accumulate (bool): whether to accumulate into self
Example::
>>> src = torch.tensor([[4, 3, 5],
... [6, 7, 8]])
>>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
tensor([[ 4, 9, 5],
[ 10, 7, 8]])
""",
)
add_docstr_all(
"put",
r"""
put(input, index, source, accumulate=False) -> Tensor
Out-of-place version of :meth:`torch.Tensor.put_`.
`input` corresponds to `self` in :meth:`torch.Tensor.put_`.
""",
)
add_docstr_all(
"qr",
r"""
qr(some=True) -> (Tensor, Tensor)
See :func:`torch.qr`
""",
)
add_docstr_all(
"qscheme",
r"""
qscheme() -> torch.qscheme
Returns the quantization scheme of a given QTensor.
""",
)
add_docstr_all(
"quantile",
r"""
quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
See :func:`torch.quantile`
""",
)
add_docstr_all(
"nanquantile",
r"""
nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor
See :func:`torch.nanquantile`
""",
)
add_docstr_all(
"q_scale",
r"""
q_scale() -> float
Given a Tensor quantized by linear(affine) quantization,
returns the scale of the underlying quantizer().
""",
)
add_docstr_all(
"q_zero_point",
r"""
q_zero_point() -> int
Given a Tensor quantized by linear(affine) quantization,
returns the zero_point of the underlying quantizer().
""",
)
add_docstr_all(
"q_per_channel_scales",
r"""
q_per_channel_scales() -> Tensor
Given a Tensor quantized by linear (affine) per-channel quantization,
returns a Tensor of scales of the underlying quantizer. It has the number of
elements that matches the corresponding dimensions (from q_per_channel_axis) of
the tensor.
""",
)
add_docstr_all(
"q_per_channel_zero_points",
r"""
q_per_channel_zero_points() -> Tensor
Given a Tensor quantized by linear (affine) per-channel quantization,
returns a tensor of zero_points of the underlying quantizer. It has the number of
elements that matches the corresponding dimensions (from q_per_channel_axis) of
the tensor.
""",
)
add_docstr_all(
"q_per_channel_axis",
r"""
q_per_channel_axis() -> int
Given a Tensor quantized by linear (affine) per-channel quantization,
returns the index of dimension on which per-channel quantization is applied.
""",
)
add_docstr_all(
"random_",
r"""
random_(from=0, to=None, *, generator=None) -> Tensor
Fills :attr:`self` tensor with numbers sampled from the discrete uniform
distribution over ``[from, to - 1]``. If not specified, the values are usually
only bounded by :attr:`self` tensor's data type. However, for floating point
types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
will be uniform in ``[0, 2^53]``.
""",
)
add_docstr_all(
"rad2deg",
r"""
rad2deg() -> Tensor
See :func:`torch.rad2deg`
""",
)
add_docstr_all(
"rad2deg_",
r"""
rad2deg_() -> Tensor
In-place version of :meth:`~Tensor.rad2deg`
""",
)
add_docstr_all(
"deg2rad",
r"""
deg2rad() -> Tensor
See :func:`torch.deg2rad`
""",
)
add_docstr_all(
"deg2rad_",
r"""
deg2rad_() -> Tensor
In-place version of :meth:`~Tensor.deg2rad`
""",
)
add_docstr_all(
"ravel",
r"""
ravel() -> Tensor
see :func:`torch.ravel`
""",
)
add_docstr_all(
"reciprocal",
r"""
reciprocal() -> Tensor
See :func:`torch.reciprocal`
""",
)
add_docstr_all(
"reciprocal_",
r"""
reciprocal_() -> Tensor
In-place version of :meth:`~Tensor.reciprocal`
""",
)
add_docstr_all(
"record_stream",
r"""
record_stream(stream)
Ensures that the tensor memory is not reused for another tensor until all
current work queued on :attr:`stream` are complete.
.. note::
The caching allocator is aware of only the stream where a tensor was
allocated. Due to the awareness, it already correctly manages the life
cycle of tensors on only one stream. But if a tensor is used on a stream
different from the stream of origin, the allocator might reuse the memory
unexpectedly. Calling this method lets the allocator know which streams
have used the tensor.
""",
)
add_docstr_all(
"remainder",
r"""
remainder(divisor) -> Tensor
See :func:`torch.remainder`
""",
)
add_docstr_all(
"remainder_",
r"""
remainder_(divisor) -> Tensor
In-place version of :meth:`~Tensor.remainder`
""",
)
add_docstr_all(
"renorm",
r"""
renorm(p, dim, maxnorm) -> Tensor
See :func:`torch.renorm`
""",
)
add_docstr_all(
"renorm_",
r"""
renorm_(p, dim, maxnorm) -> Tensor
In-place version of :meth:`~Tensor.renorm`
""",
)
add_docstr_all(
"repeat",
r"""
repeat(*sizes) -> Tensor
Repeats this tensor along the specified dimensions.
Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.
.. warning::
:meth:`~Tensor.repeat` behaves differently from
`numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
but is more similar to
`numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.
Args:
sizes (torch.Size or int...): The number of times to repeat this tensor along each
dimension
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat(4, 2)
tensor([[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3],
[ 1, 2, 3, 1, 2, 3]])
>>> x.repeat(4, 2, 1).size()
torch.Size([4, 2, 3])
""",
)
add_docstr_all(
"repeat_interleave",
r"""
repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor
See :func:`torch.repeat_interleave`.
""",
)
add_docstr_all(
"requires_grad_",
r"""
requires_grad_(requires_grad=True) -> Tensor
Change if autograd should record operations on this tensor: sets this tensor's
:attr:`requires_grad` attribute in-place. Returns this tensor.
:func:`requires_grad_`'s main use case is to tell autograd to begin recording
operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
(because it was obtained through a DataLoader, or required preprocessing or
initialization), ``tensor.requires_grad_()`` makes it so that autograd will
begin to record operations on ``tensor``.
Args:
requires_grad (bool): If autograd should record operations on this tensor.
Default: ``True``.
Example::
>>> # Let's say we want to preprocess some saved weights and use
>>> # the result as new weights.
>>> saved_weights = [0.1, 0.2, 0.3, 0.25]
>>> loaded_weights = torch.tensor(saved_weights)
>>> weights = preprocess(loaded_weights) # some function
>>> weights
tensor([-0.5503, 0.4926, -2.1158, -0.8303])
>>> # Now, start to record operations done to weights
>>> weights.requires_grad_()
>>> out = weights.pow(2).sum()
>>> out.backward()
>>> weights.grad
tensor([-1.1007, 0.9853, -4.2316, -1.6606])
""",
)
add_docstr_all(
"reshape",
r"""
reshape(*shape) -> Tensor
Returns a tensor with the same data and number of elements as :attr:`self`
but with the specified shape. This method returns a view if :attr:`shape` is
compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
possible to return a view.
See :func:`torch.reshape`
Args:
shape (tuple of ints or int...): the desired shape
""",
)
add_docstr_all(
"reshape_as",
r"""
reshape_as(other) -> Tensor
Returns this tensor as the same shape as :attr:`other`.
``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
This method returns a view if ``other.sizes()`` is compatible with the current
shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.
Please see :meth:`reshape` for more information about ``reshape``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same shape
as :attr:`other`.
""",
)
add_docstr_all(
"resize_",
r"""
resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor
Resizes :attr:`self` tensor to the specified size. If the number of elements is
larger than the current storage size, then the underlying storage is resized
to fit the new number of elements. If the number of elements is smaller, the
underlying storage is not changed. Existing elements are preserved but any new
memory is uninitialized.
.. warning::
This is a low-level method. The storage is reinterpreted as C-contiguous,
ignoring the current strides (unless the target size equals the current
size, in which case the tensor is left unchanged). For most purposes, you
will instead want to use :meth:`~Tensor.view()`, which checks for
contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
change the size in-place with custom strides, see :meth:`~Tensor.set_()`.
Args:
sizes (torch.Size or int...): the desired size
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
Tensor. Default: ``torch.contiguous_format``. Note that memory format of
:attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.
Example::
>>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
>>> x.resize_(2, 2)
tensor([[ 1, 2],
[ 3, 4]])
""",
)
add_docstr_all(
"resize_as_",
r"""
resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor
Resizes the :attr:`self` tensor to be the same size as the specified
:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.
Args:
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
Tensor. Default: ``torch.contiguous_format``. Note that memory format of
:attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.
""",
)
add_docstr_all(
"rot90",
r"""
rot90(k, dims) -> Tensor
See :func:`torch.rot90`
""",
)
add_docstr_all(
"round",
r"""
round(decimals=0) -> Tensor
See :func:`torch.round`
""",
)
add_docstr_all(
"round_",
r"""
round_(decimals=0) -> Tensor
In-place version of :meth:`~Tensor.round`
""",
)
add_docstr_all(
"rsqrt",
r"""
rsqrt() -> Tensor
See :func:`torch.rsqrt`
""",
)
add_docstr_all(
"rsqrt_",
r"""
rsqrt_() -> Tensor
In-place version of :meth:`~Tensor.rsqrt`
""",
)
add_docstr_all(
"scatter_",
r"""
scatter_(dim, index, src, reduce=None) -> Tensor
Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
index is specified by its index in :attr:`src` for ``dimension != dim`` and by
the corresponding value in :attr:`index` for ``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
This is the reverse operation of the manner described in :meth:`~Tensor.gather`.
:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
the same number of dimensions. It is also required that
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
Note that ``index`` and ``src`` do not broadcast.
Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
between ``0`` and ``self.size(dim) - 1`` inclusive.
.. warning::
When indices are not unique, the behavior is non-deterministic (one of the
values from ``src`` will be picked arbitrarily) and the gradient will be
incorrect (it will be propagated to all locations in the source that
correspond to the same index)!
.. note::
The backward pass is implemented only for ``src.shape == index.shape``.
Additionally accepts an optional :attr:`reduce` argument that allows
specification of an optional reduction operation, which is applied to all
values in the tensor :attr:`src` into :attr:`self` at the indicies
specified in the :attr:`index`. For each value in :attr:`src`, the reduction
operation is applied to an index in :attr:`self` which is specified by
its index in :attr:`src` for ``dimension != dim`` and by the corresponding
value in :attr:`index` for ``dimension = dim``.
Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
is updated as::
self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2
Reducing with the addition operation is the same as using
:meth:`~torch.Tensor.scatter_add_`.
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter, can be either empty
or of the same dimensionality as ``src``. When empty, the operation
returns ``self`` unchanged.
src (Tensor or float): the source element(s) to scatter.
reduce (str, optional): reduction operation to apply, can be either
``'add'`` or ``'multiply'``.
Example::
>>> src = torch.arange(1, 11).reshape((2, 5))
>>> src
tensor([[ 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10]])
>>> index = torch.tensor([[0, 1, 2, 0]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
tensor([[1, 0, 0, 4, 0],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
>>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
tensor([[1, 2, 3, 0, 0],
[6, 7, 0, 0, 8],
[0, 0, 0, 0, 0]])
>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
... 1.23, reduce='multiply')
tensor([[2.0000, 2.0000, 2.4600, 2.0000],
[2.0000, 2.0000, 2.0000, 2.4600]])
>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
... 1.23, reduce='add')
tensor([[2.0000, 2.0000, 3.2300, 2.0000],
[2.0000, 2.0000, 2.0000, 3.2300]])
""",
)
add_docstr_all(
"scatter_add_",
r"""
scatter_add_(dim, index, src) -> Tensor
Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index` tensor in a similar fashion as
:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
an index in :attr:`self` which is specified by its index in :attr:`src`
for ``dimension != dim`` and by the corresponding value in :attr:`index` for
``dimension = dim``.
For a 3-D tensor, :attr:`self` is updated as::
self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
:attr:`self`, :attr:`index` and :attr:`src` should have same number of
dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
``d != dim``. Note that ``index`` and ``src`` do not broadcast.
Note:
{forward_reproducibility_note}
.. note::
The backward pass is implemented only for ``src.shape == index.shape``.
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter and add, can be
either empty or of the same dimensionality as ``src``. When empty, the
operation returns ``self`` unchanged.
src (Tensor): the source elements to scatter and add
Example::
>>> src = torch.ones((2, 5))
>>> index = torch.tensor([[0, 1, 2, 0, 0]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
tensor([[1., 0., 0., 1., 1.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.]])
>>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
tensor([[2., 0., 0., 1., 1.],
[0., 2., 0., 0., 0.],
[0., 0., 2., 1., 1.]])
""".format(
**reproducibility_notes
),
)
add_docstr_all(
"scatter_reduce_",
r"""
scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor
Reduces all values from the :attr:`src` tensor to the indices specified in
the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
:obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
index in :attr:`self` which is specified by its index in :attr:`src` for
``dimension != dim`` and by the corresponding value in :attr:`index` for
``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
tensor are included in the reduction.
:attr:`self`, :attr:`index` and :attr:`src` should all have
the same number of dimensions. It is also required that
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
Note that ``index`` and ``src`` do not broadcast.
For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
output is given as::
self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2
Note:
{forward_reproducibility_note}
.. note::
The backward pass is implemented only for ``src.shape == index.shape``.
.. warning::
This function is in beta and may change in the near future.
Args:
dim (int): the axis along which to index
index (LongTensor): the indices of elements to scatter and reduce.
src (Tensor): the source elements to scatter and reduce
reduce (str): the reduction operation to apply for non-unique indices
(:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
include_self (bool): whether elements from the :attr:`self` tensor are
included in the reduction
Example::
>>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
>>> index = torch.tensor([0, 1, 0, 1, 2, 1])
>>> input = torch.tensor([1., 2., 3., 4.])
>>> input.scatter_reduce(0, index, src, reduce="sum")
tensor([5., 14., 8., 4.])
>>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
tensor([4., 12., 5., 4.])
>>> input2 = torch.tensor([5., 4., 3., 2.])
>>> input2.scatter_reduce(0, index, src, reduce="amax")
tensor([5., 6., 5., 2.])
>>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
tensor([3., 6., 5., 2.])
""".format(
**reproducibility_notes
),
)
add_docstr_all(
"select",
r"""
select(dim, index) -> Tensor
See :func:`torch.select`
""",
)
add_docstr_all(
"select_scatter",
r"""
select_scatter(src, dim, index) -> Tensor
See :func:`torch.select_scatter`
""",
)
add_docstr_all(
"slice_scatter",
r"""
slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor
See :func:`torch.slice_scatter`
""",
)
add_docstr_all(
"set_",
r"""
set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor
Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
:attr:`self` tensor will share the same storage and have the same size and
strides as :attr:`source`. Changes to elements in one tensor will be reflected
in the other.
If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
storage, offset, size, and stride.
Args:
source (Tensor or Storage): the tensor or storage to use
storage_offset (int, optional): the offset in the storage
size (torch.Size, optional): the desired size. Defaults to the size of the source.
stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
""",
)
add_docstr_all(
"sigmoid",
r"""
sigmoid() -> Tensor
See :func:`torch.sigmoid`
""",
)
add_docstr_all(
"sigmoid_",
r"""
sigmoid_() -> Tensor
In-place version of :meth:`~Tensor.sigmoid`
""",
)
add_docstr_all(
"logit",
r"""
logit() -> Tensor
See :func:`torch.logit`
""",
)
add_docstr_all(
"logit_",
r"""
logit_() -> Tensor
In-place version of :meth:`~Tensor.logit`
""",
)
add_docstr_all(
"sign",
r"""
sign() -> Tensor
See :func:`torch.sign`
""",
)
add_docstr_all(
"sign_",
r"""
sign_() -> Tensor
In-place version of :meth:`~Tensor.sign`
""",
)
add_docstr_all(
"signbit",
r"""
signbit() -> Tensor
See :func:`torch.signbit`
""",
)
add_docstr_all(
"sgn",
r"""
sgn() -> Tensor
See :func:`torch.sgn`
""",
)
add_docstr_all(
"sgn_",
r"""
sgn_() -> Tensor
In-place version of :meth:`~Tensor.sgn`
""",
)
add_docstr_all(
"sin",
r"""
sin() -> Tensor
See :func:`torch.sin`
""",
)
add_docstr_all(
"sin_",
r"""
sin_() -> Tensor
In-place version of :meth:`~Tensor.sin`
""",
)
add_docstr_all(
"sinc",
r"""
sinc() -> Tensor
See :func:`torch.sinc`
""",
)
add_docstr_all(
"sinc_",
r"""
sinc_() -> Tensor
In-place version of :meth:`~Tensor.sinc`
""",
)
add_docstr_all(
"sinh",
r"""
sinh() -> Tensor
See :func:`torch.sinh`
""",
)
add_docstr_all(
"sinh_",
r"""
sinh_() -> Tensor
In-place version of :meth:`~Tensor.sinh`
""",
)
add_docstr_all(
"size",
r"""
size(dim=None) -> torch.Size or int
Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
If ``dim`` is specified, returns an int holding the size of that dimension.
Args:
dim (int, optional): The dimension for which to retrieve the size.
Example::
>>> t = torch.empty(3, 4, 5)
>>> t.size()
torch.Size([3, 4, 5])
>>> t.size(dim=1)
4
""",
)
add_docstr_all(
"sort",
r"""
sort(dim=-1, descending=False) -> (Tensor, LongTensor)
See :func:`torch.sort`
""",
)
add_docstr_all(
"msort",
r"""
msort() -> Tensor
See :func:`torch.msort`
""",
)
add_docstr_all(
"argsort",
r"""
argsort(dim=-1, descending=False) -> LongTensor
See :func:`torch.argsort`
""",
)
add_docstr_all(
"sparse_dim",
r"""
sparse_dim() -> int
Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.
.. warning::
Throws an error if :attr:`self` is not a sparse tensor.
See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
""",
)
add_docstr_all(
"sparse_resize_",
r"""
sparse_resize_(size, sparse_dim, dense_dim) -> Tensor
Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
size and the number of sparse and dense dimensions.
.. note::
If the number of specified elements in :attr:`self` is zero, then
:attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
size and positive integers such that ``len(size) == sparse_dim +
dense_dim``.
If :attr:`self` specifies one or more elements, however, then each
dimension in :attr:`size` must not be smaller than the corresponding
dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
equal the number of dense dimensions in :attr:`self`.
.. warning::
Throws an error if :attr:`self` is not a sparse tensor.
Args:
size (torch.Size): the desired size. If :attr:`self` is non-empty
sparse tensor, the desired size cannot be smaller than the
original size.
sparse_dim (int): the number of sparse dimensions
dense_dim (int): the number of dense dimensions
""",
)
add_docstr_all(
"sparse_resize_and_clear_",
r"""
sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor
Removes all specified elements from a :ref:`sparse tensor
<sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
size and the number of sparse and dense dimensions.
.. warning:
Throws an error if :attr:`self` is not a sparse tensor.
Args:
size (torch.Size): the desired size.
sparse_dim (int): the number of sparse dimensions
dense_dim (int): the number of dense dimensions
""",
)
add_docstr_all(
"sqrt",
r"""
sqrt() -> Tensor
See :func:`torch.sqrt`
""",
)
add_docstr_all(
"sqrt_",
r"""
sqrt_() -> Tensor
In-place version of :meth:`~Tensor.sqrt`
""",
)
add_docstr_all(
"square",
r"""
square() -> Tensor
See :func:`torch.square`
""",
)
add_docstr_all(
"square_",
r"""
square_() -> Tensor
In-place version of :meth:`~Tensor.square`
""",
)
add_docstr_all(
"squeeze",
r"""
squeeze(dim=None) -> Tensor
See :func:`torch.squeeze`
""",
)
add_docstr_all(
"squeeze_",
r"""
squeeze_(dim=None) -> Tensor
In-place version of :meth:`~Tensor.squeeze`
""",
)
add_docstr_all(
"std",
r"""
std(dim, unbiased=True, keepdim=False) -> Tensor
See :func:`torch.std`
.. function:: std(unbiased=True) -> Tensor
:noindex:
See :func:`torch.std`
""",
)
add_docstr_all(
"storage_offset",
r"""
storage_offset() -> int
Returns :attr:`self` tensor's offset in the underlying storage in terms of
number of storage elements (not bytes).
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5])
>>> x.storage_offset()
0
>>> x[3:].storage_offset()
3
""",
)
add_docstr_all(
"stride",
r"""
stride(dim) -> tuple or int
Returns the stride of :attr:`self` tensor.
Stride is the jump necessary to go from one element to the next one in the
specified dimension :attr:`dim`. A tuple of all strides is returned when no
argument is passed in. Otherwise, an integer value is returned as the stride in
the particular dimension :attr:`dim`.
Args:
dim (int, optional): the desired dimension in which stride is required
Example::
>>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
>>> x.stride()
(5, 1)
>>> x.stride(0)
5
>>> x.stride(-1)
1
""",
)
add_docstr_all(
"sub",
r"""
sub(other, *, alpha=1) -> Tensor
See :func:`torch.sub`.
""",
)
add_docstr_all(
"sub_",
r"""
sub_(other, *, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.sub`
""",
)
add_docstr_all(
"subtract",
r"""
subtract(other, *, alpha=1) -> Tensor
See :func:`torch.subtract`.
""",
)
add_docstr_all(
"subtract_",
r"""
subtract_(other, *, alpha=1) -> Tensor
In-place version of :meth:`~Tensor.subtract`.
""",
)
add_docstr_all(
"sum",
r"""
sum(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.sum`
""",
)
add_docstr_all(
"nansum",
r"""
nansum(dim=None, keepdim=False, dtype=None) -> Tensor
See :func:`torch.nansum`
""",
)
add_docstr_all(
"svd",
r"""
svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)
See :func:`torch.svd`
""",
)
add_docstr_all(
"symeig",
r"""
symeig(eigenvectors=False, upper=True) -> (Tensor, Tensor)
See :func:`torch.symeig`
""",
)
add_docstr_all(
"swapdims",
r"""
swapdims(dim0, dim1) -> Tensor
See :func:`torch.swapdims`
""",
)
add_docstr_all(
"swapdims_",
r"""
swapdims_(dim0, dim1) -> Tensor
In-place version of :meth:`~Tensor.swapdims`
""",
)
add_docstr_all(
"swapaxes",
r"""
swapaxes(axis0, axis1) -> Tensor
See :func:`torch.swapaxes`
""",
)
add_docstr_all(
"swapaxes_",
r"""
swapaxes_(axis0, axis1) -> Tensor
In-place version of :meth:`~Tensor.swapaxes`
""",
)
add_docstr_all(
"t",
r"""
t() -> Tensor
See :func:`torch.t`
""",
)
add_docstr_all(
"t_",
r"""
t_() -> Tensor
In-place version of :meth:`~Tensor.t`
""",
)
add_docstr_all(
"tile",
r"""
tile(*reps) -> Tensor
See :func:`torch.tile`
""",
)
add_docstr_all(
"to",
r"""
to(*args, **kwargs) -> Tensor
Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
inferred from the arguments of ``self.to(*args, **kwargs)``.
.. note::
If the ``self`` Tensor already
has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
Otherwise, the returned tensor is a copy of ``self`` with the desired
:class:`torch.dtype` and :class:`torch.device`.
Here are the ways to call ``to``:
.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
:noindex:
Returns a Tensor with the specified :attr:`dtype`
Args:
{memory_format}
.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
:noindex:
Returns a Tensor with the specified :attr:`device` and (optional)
:attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
When :attr:`non_blocking`, tries to convert asynchronously with respect to
the host if possible, e.g., converting a CPU Tensor with pinned memory to a
CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
Args:
{memory_format}
.. method:: to(other, non_blocking=False, copy=False) -> Tensor
:noindex:
Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
asynchronously with respect to the host if possible, e.g., converting a CPU
Tensor with pinned memory to a CUDA Tensor.
When :attr:`copy` is set, a new Tensor is created even when the Tensor
already matches the desired conversion.
Example::
>>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu
>>> tensor.to(torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64)
>>> cuda0 = torch.device('cuda:0')
>>> tensor.to(cuda0)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], device='cuda:0')
>>> tensor.to(cuda0, dtype=torch.float64)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
>>> other = torch.randn((), dtype=torch.float64, device=cuda0)
>>> tensor.to(other, non_blocking=True)
tensor([[-0.5044, 0.0005],
[ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
""".format(
**common_args
),
)
add_docstr_all(
"byte",
r"""
byte(memory_format=torch.preserve_format) -> Tensor
``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"bool",
r"""
bool(memory_format=torch.preserve_format) -> Tensor
``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"char",
r"""
char(memory_format=torch.preserve_format) -> Tensor
``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"bfloat16",
r"""
bfloat16(memory_format=torch.preserve_format) -> Tensor
``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"double",
r"""
double(memory_format=torch.preserve_format) -> Tensor
``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"float",
r"""
float(memory_format=torch.preserve_format) -> Tensor
``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"cdouble",
r"""
cdouble(memory_format=torch.preserve_format) -> Tensor
``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"cfloat",
r"""
cfloat(memory_format=torch.preserve_format) -> Tensor
``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"chalf",
r"""
chalf(memory_format=torch.preserve_format) -> Tensor
``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"half",
r"""
half(memory_format=torch.preserve_format) -> Tensor
``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"int",
r"""
int(memory_format=torch.preserve_format) -> Tensor
``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"int_repr",
r"""
int_repr() -> Tensor
Given a quantized Tensor,
``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
underlying uint8_t values of the given Tensor.
""",
)
add_docstr_all(
"long",
r"""
long(memory_format=torch.preserve_format) -> Tensor
``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"short",
r"""
short(memory_format=torch.preserve_format) -> Tensor
``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.
Args:
{memory_format}
""".format(
**common_args
),
)
add_docstr_all(
"take",
r"""
take(indices) -> Tensor
See :func:`torch.take`
""",
)
add_docstr_all(
"take_along_dim",
r"""
take_along_dim(indices, dim) -> Tensor
See :func:`torch.take_along_dim`
""",
)
add_docstr_all(
"tan",
r"""
tan() -> Tensor
See :func:`torch.tan`
""",
)
add_docstr_all(
"tan_",
r"""
tan_() -> Tensor
In-place version of :meth:`~Tensor.tan`
""",
)
add_docstr_all(
"tanh",
r"""
tanh() -> Tensor
See :func:`torch.tanh`
""",
)
add_docstr_all(
"tanh_",
r"""
tanh_() -> Tensor
In-place version of :meth:`~Tensor.tanh`
""",
)
add_docstr_all(
"tolist",
r"""
tolist() -> list or number
Returns the tensor as a (nested) list. For scalars, a standard
Python number is returned, just like with :meth:`~Tensor.item`.
Tensors are automatically moved to the CPU first if necessary.
This operation is not differentiable.
Examples::
>>> a = torch.randn(2, 2)
>>> a.tolist()
[[0.012766935862600803, 0.5415473580360413],
[-0.08909505605697632, 0.7729271650314331]]
>>> a[0,0].tolist()
0.012766935862600803
""",
)
add_docstr_all(
"topk",
r"""
topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)
See :func:`torch.topk`
""",
)
add_docstr_all(
"to_dense",
r"""
to_dense() -> Tensor
Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.
Example::
>>> s = torch.sparse_coo_tensor(
... torch.tensor([[1, 1],
... [0, 2]]),
... torch.tensor([9, 10]),
... size=(3, 3))
>>> s.to_dense()
tensor([[ 0, 0, 0],
[ 9, 0, 10],
[ 0, 0, 0]])
""",
)
add_docstr_all(
"to_sparse",
r"""
to_sparse(sparseDims) -> Tensor
Returns a sparse copy of the tensor. PyTorch supports sparse tensors in
:ref:`coordinate format <sparse-coo-docs>`.
Args:
sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor
Example::
>>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
>>> d
tensor([[ 0, 0, 0],
[ 9, 0, 10],
[ 0, 0, 0]])
>>> d.to_sparse()
tensor(indices=tensor([[1, 1],
[0, 2]]),
values=tensor([ 9, 10]),
size=(3, 3), nnz=2, layout=torch.sparse_coo)
>>> d.to_sparse(1)
tensor(indices=tensor([[1]]),
values=tensor([[ 9, 0, 10]]),
size=(3, 3), nnz=1, layout=torch.sparse_coo)
""",
)
add_docstr_all(
"to_sparse_csr",
r"""
to_sparse_csr() -> Tensor
Convert a tensor to compressed row storage format (CSR). Only works with 2D tensors.
Example::
>>> dense = torch.randn(5, 5)
>>> sparse = dense.to_sparse_csr()
>>> sparse._nnz()
25
""",
)
add_docstr_all(
"to_sparse_csc",
r"""
to_sparse_csc() -> Tensor
Convert a tensor to compressed column storage (CSC) format. Only works with 2D tensors.
Example::
>>> dense = torch.randn(5, 5)
>>> sparse = dense.to_sparse_csc()
>>> sparse._nnz()
25
""",
)
add_docstr_all(
"to_sparse_bsr",
r"""
to_sparse_bsr(blocksize) -> Tensor
Convert a CSR tensor to a block sparse row (BSR) storage format of given blocksize.
Example::
>>> dense = torch.randn(10, 10)
>>> sparse = dense.to_sparse_csr()
>>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
>>> sparse_bsr.col_indices()
tensor([0, 1, 0, 1])
""",
)
add_docstr_all(
"to_sparse_bsc",
r"""
to_sparse_bsc(blocksize) -> Tensor
Convert a CSR tensor to a block sparse column (BSC) storage format of given blocksize.
Example::
>>> dense = torch.randn(10, 10)
>>> sparse = dense.to_sparse_csr()
>>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
>>> sparse_bsc.row_indices()
tensor([0, 1, 0, 1])
""",
)
add_docstr_all(
"to_mkldnn",
r"""
to_mkldnn() -> Tensor
Returns a copy of the tensor in ``torch.mkldnn`` layout.
""",
)
add_docstr_all(
"trace",
r"""
trace() -> Tensor
See :func:`torch.trace`
""",
)
add_docstr_all(
"transpose",
r"""
transpose(dim0, dim1) -> Tensor
See :func:`torch.transpose`
""",
)
add_docstr_all(
"transpose_",
r"""
transpose_(dim0, dim1) -> Tensor
In-place version of :meth:`~Tensor.transpose`
""",
)
add_docstr_all(
"triangular_solve",
r"""
triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
See :func:`torch.triangular_solve`
""",
)
add_docstr_all(
"tril",
r"""
tril(diagonal=0) -> Tensor
See :func:`torch.tril`
""",
)
add_docstr_all(
"tril_",
r"""
tril_(diagonal=0) -> Tensor
In-place version of :meth:`~Tensor.tril`
""",
)
add_docstr_all(
"triu",
r"""
triu(diagonal=0) -> Tensor
See :func:`torch.triu`
""",
)
add_docstr_all(
"triu_",
r"""
triu_(diagonal=0) -> Tensor
In-place version of :meth:`~Tensor.triu`
""",
)
add_docstr_all(
"true_divide",
r"""
true_divide(value) -> Tensor
See :func:`torch.true_divide`
""",
)
add_docstr_all(
"true_divide_",
r"""
true_divide_(value) -> Tensor
In-place version of :meth:`~Tensor.true_divide_`
""",
)
add_docstr_all(
"trunc",
r"""
trunc() -> Tensor
See :func:`torch.trunc`
""",
)
add_docstr_all(
"fix",
r"""
fix() -> Tensor
See :func:`torch.fix`.
""",
)
add_docstr_all(
"trunc_",
r"""
trunc_() -> Tensor
In-place version of :meth:`~Tensor.trunc`
""",
)
add_docstr_all(
"fix_",
r"""
fix_() -> Tensor
In-place version of :meth:`~Tensor.fix`
""",
)
add_docstr_all(
"type",
r"""
type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
Returns the type if `dtype` is not provided, else casts this object to
the specified type.
If this is already of the correct type, no copy is performed and the
original object is returned.
Args:
dtype (dtype or string): The desired type
non_blocking (bool): If ``True``, and the source is in pinned memory
and destination is on the GPU or vice versa, the copy is performed
asynchronously with respect to the host. Otherwise, the argument
has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument. The ``async`` arg is deprecated.
""",
)
add_docstr_all(
"type_as",
r"""
type_as(tensor) -> Tensor
Returns this tensor cast to the type of the given tensor.
This is a no-op if the tensor is already of the correct type. This is
equivalent to ``self.type(tensor.type())``
Args:
tensor (Tensor): the tensor which has the desired type
""",
)
add_docstr_all(
"unfold",
r"""
unfold(dimension, size, step) -> Tensor
Returns a view of the original tensor which contains all slices of size :attr:`size` from
:attr:`self` tensor in the dimension :attr:`dimension`.
Step between two slices is given by :attr:`step`.
If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
dimension :attr:`dimension` in the returned tensor will be
`(sizedim - size) / step + 1`.
An additional dimension of size :attr:`size` is appended in the returned tensor.
Args:
dimension (int): dimension in which unfolding happens
size (int): the size of each slice that is unfolded
step (int): the step between each slice
Example::
>>> x = torch.arange(1., 8)
>>> x
tensor([ 1., 2., 3., 4., 5., 6., 7.])
>>> x.unfold(0, 2, 1)
tensor([[ 1., 2.],
[ 2., 3.],
[ 3., 4.],
[ 4., 5.],
[ 5., 6.],
[ 6., 7.]])
>>> x.unfold(0, 2, 2)
tensor([[ 1., 2.],
[ 3., 4.],
[ 5., 6.]])
""",
)
add_docstr_all(
"uniform_",
r"""
uniform_(from=0, to=1) -> Tensor
Fills :attr:`self` tensor with numbers sampled from the continuous uniform
distribution:
.. math::
P(x) = \dfrac{1}{\text{to} - \text{from}}
""",
)
add_docstr_all(
"unsqueeze",
r"""
unsqueeze(dim) -> Tensor
See :func:`torch.unsqueeze`
""",
)
add_docstr_all(
"unsqueeze_",
r"""
unsqueeze_(dim) -> Tensor
In-place version of :meth:`~Tensor.unsqueeze`
""",
)
add_docstr_all(
"var",
r"""
var(dim, unbiased=True, keepdim=False) -> Tensor
See :func:`torch.var`
.. function:: var(unbiased=True) -> Tensor
:noindex:
See :func:`torch.var`
""",
)
add_docstr_all(
"vdot",
r"""
vdot(other) -> Tensor
See :func:`torch.vdot`
""",
)
add_docstr_all(
"view",
r"""
view(*shape) -> Tensor
Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`shape`.
The returned tensor shares the same data and must have the same number
of elements, but may have a different size. For a tensor to be viewed, the new
view size must be compatible with its original size and stride, i.e., each new
view dimension must either be a subspace of an original dimension, or only span
across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,
.. math::
\text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]
Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
returns a view if the shapes are compatible, and copies (equivalent to calling
:meth:`contiguous`) otherwise.
Args:
shape (torch.Size or int...): the desired size
Example::
>>> x = torch.randn(4, 4)
>>> x.size()
torch.Size([4, 4])
>>> y = x.view(16)
>>> y.size()
torch.Size([16])
>>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions
>>> z.size()
torch.Size([2, 8])
>>> a = torch.randn(1, 2, 3, 4)
>>> a.size()
torch.Size([1, 2, 3, 4])
>>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension
>>> b.size()
torch.Size([1, 3, 2, 4])
>>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory
>>> c.size()
torch.Size([1, 3, 2, 4])
>>> torch.equal(b, c)
False
.. method:: view(dtype) -> Tensor
:noindex:
Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`dtype`.
If the element size of :attr:`dtype` is different than that of ``self.dtype``,
then the size of the last dimension of the output will be scaled
proportionally. For instance, if :attr:`dtype` element size is twice that of
``self.dtype``, then each pair of elements in the last dimension of
:attr:`self` will be combined, and the size of the last dimension of the output
will be half that of :attr:`self`. If :attr:`dtype` element size is half that
of ``self.dtype``, then each element in the last dimension of :attr:`self` will
be split in two, and the size of the last dimension of the output will be
double that of :attr:`self`. For this to be possible, the following conditions
must be true:
* ``self.dim()`` must be greater than 0.
* ``self.stride(-1)`` must be 1.
Additionally, if the element size of :attr:`dtype` is greater than that of
``self.dtype``, the following conditions must be true as well:
* ``self.size(-1)`` must be divisible by the ratio between the element
sizes of the dtypes.
* ``self.storage_offset()`` must be divisible by the ratio between the
element sizes of the dtypes.
* The strides of all dimensions, except the last dimension, must be
divisible by the ratio between the element sizes of the dtypes.
If any of the above conditions are not met, an error is thrown.
.. warning::
This overload is not supported by TorchScript, and using it in a Torchscript
program will cause undefined behavior.
Args:
dtype (:class:`torch.dtype`): the desired dtype
Example::
>>> x = torch.randn(4, 4)
>>> x
tensor([[ 0.9482, -0.0310, 1.4999, -0.5316],
[-0.1520, 0.7472, 0.5617, -0.8649],
[-2.4724, -0.0334, -0.2976, -0.8499],
[-0.2109, 1.9913, -0.9607, -0.6123]])
>>> x.dtype
torch.float32
>>> y = x.view(torch.int32)
>>> y
tensor([[ 1064483442, -1124191867, 1069546515, -1089989247],
[-1105482831, 1061112040, 1057999968, -1084397505],
[-1071760287, -1123489973, -1097310419, -1084649136],
[-1101533110, 1073668768, -1082790149, -1088634448]],
dtype=torch.int32)
>>> y[0, 0] = 1000000000
>>> x
tensor([[ 0.0047, -0.0310, 1.4999, -0.5316],
[-0.1520, 0.7472, 0.5617, -0.8649],
[-2.4724, -0.0334, -0.2976, -0.8499],
[-0.2109, 1.9913, -0.9607, -0.6123]])
>>> x.view(torch.cfloat)
tensor([[ 0.0047-0.0310j, 1.4999-0.5316j],
[-0.1520+0.7472j, 0.5617-0.8649j],
[-2.4724-0.0334j, -0.2976-0.8499j],
[-0.2109+1.9913j, -0.9607-0.6123j]])
>>> x.view(torch.cfloat).size()
torch.Size([4, 2])
>>> x.view(torch.uint8)
tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22,
8, 191],
[227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106,
93, 191],
[205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147,
89, 191],
[ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191,
28, 191]], dtype=torch.uint8)
>>> x.view(torch.uint8).size()
torch.Size([4, 16])
""",
)
add_docstr_all(
"view_as",
r"""
view_as(other) -> Tensor
View this tensor as the same size as :attr:`other`.
``self.view_as(other)`` is equivalent to ``self.view(other.size())``.
Please see :meth:`~Tensor.view` for more information about ``view``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same size
as :attr:`other`.
""",
)
add_docstr_all(
"expand",
r"""
expand(*sizes) -> Tensor
Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
to a larger size.
Passing -1 as the size for a dimension means not changing the size of
that dimension.
Tensor can be also expanded to a larger number of dimensions, and the
new ones will be appended at the front. For the new dimensions, the
size cannot be set to -1.
Expanding a tensor does not allocate new memory, but only creates a
new view on the existing tensor where a dimension of size one is
expanded to a larger size by setting the ``stride`` to 0. Any dimension
of size 1 can be expanded to an arbitrary value without allocating new
memory.
Args:
*sizes (torch.Size or int...): the desired expanded size
.. warning::
More than one element of an expanded tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensors, please clone them first.
Example::
>>> x = torch.tensor([[1], [2], [3]])
>>> x.size()
torch.Size([3, 1])
>>> x.expand(3, 4)
tensor([[ 1, 1, 1, 1],
[ 2, 2, 2, 2],
[ 3, 3, 3, 3]])
>>> x.expand(-1, 4) # -1 means not changing the size of that dimension
tensor([[ 1, 1, 1, 1],
[ 2, 2, 2, 2],
[ 3, 3, 3, 3]])
""",
)
add_docstr_all(
"expand_as",
r"""
expand_as(other) -> Tensor
Expand this tensor to the same size as :attr:`other`.
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
Please see :meth:`~Tensor.expand` for more information about ``expand``.
Args:
other (:class:`torch.Tensor`): The result tensor has the same size
as :attr:`other`.
""",
)
add_docstr_all(
"sum_to_size",
r"""
sum_to_size(*size) -> Tensor
Sum ``this`` tensor to :attr:`size`.
:attr:`size` must be broadcastable to ``this`` tensor size.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
""",
)
add_docstr_all(
"zero_",
r"""
zero_() -> Tensor
Fills :attr:`self` tensor with zeros.
""",
)
add_docstr_all(
"matmul",
r"""
matmul(tensor2) -> Tensor
See :func:`torch.matmul`
""",
)
add_docstr_all(
"chunk",
r"""
chunk(chunks, dim=0) -> List of Tensors
See :func:`torch.chunk`
""",
)
add_docstr_all(
"unsafe_chunk",
r"""
unsafe_chunk(chunks, dim=0) -> List of Tensors
See :func:`torch.unsafe_chunk`
""",
)
add_docstr_all(
"unsafe_split",
r"""
unsafe_split(split_size, dim=0) -> List of Tensors
See :func:`torch.unsafe_split`
""",
)
add_docstr_all(
"tensor_split",
r"""
tensor_split(indices_or_sections, dim=0) -> List of Tensors
See :func:`torch.tensor_split`
""",
)
add_docstr_all(
"hsplit",
r"""
hsplit(split_size_or_sections) -> List of Tensors
See :func:`torch.hsplit`
""",
)
add_docstr_all(
"vsplit",
r"""
vsplit(split_size_or_sections) -> List of Tensors
See :func:`torch.vsplit`
""",
)
add_docstr_all(
"dsplit",
r"""
dsplit(split_size_or_sections) -> List of Tensors
See :func:`torch.dsplit`
""",
)
add_docstr_all(
"stft",
r"""
stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor
See :func:`torch.stft`
""",
)
add_docstr_all(
"istft",
r"""
istft(n_fft, hop_length=None, win_length=None, window=None,
center=True, normalized=False, onesided=True, length=None) -> Tensor
See :func:`torch.istft`
""",
)
add_docstr_all(
"det",
r"""
det() -> Tensor
See :func:`torch.det`
""",
)
add_docstr_all(
"where",
r"""
where(condition, y) -> Tensor
``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
See :func:`torch.where`
""",
)
add_docstr_all(
"logdet",
r"""
logdet() -> Tensor
See :func:`torch.logdet`
""",
)
add_docstr_all(
"slogdet",
r"""
slogdet() -> (Tensor, Tensor)
See :func:`torch.slogdet`
""",
)
add_docstr_all(
"unbind",
r"""
unbind(dim=0) -> seq
See :func:`torch.unbind`
""",
)
add_docstr_all(
"pin_memory",
r"""
pin_memory() -> Tensor
Copies the tensor to pinned memory, if it's not already pinned.
""",
)
add_docstr_all(
"pinverse",
r"""
pinverse() -> Tensor
See :func:`torch.pinverse`
""",
)
add_docstr_all(
"index_add",
r"""
index_add(dim, index, source, *, alpha=1) -> Tensor
Out-of-place version of :meth:`torch.Tensor.index_add_`.
""",
)
add_docstr_all(
"index_copy",
r"""
index_copy(dim, index, tensor2) -> Tensor
Out-of-place version of :meth:`torch.Tensor.index_copy_`.
""",
)
add_docstr_all(
"index_fill",
r"""
index_fill(dim, index, value) -> Tensor
Out-of-place version of :meth:`torch.Tensor.index_fill_`.
""",
)
add_docstr_all(
"scatter",
r"""
scatter(dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_`
""",
)
add_docstr_all(
"scatter_add",
r"""
scatter_add(dim, index, src) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_add_`
""",
)
add_docstr_all(
"scatter_reduce",
r"""
scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor
Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
""",
)
add_docstr_all(
"masked_scatter",
r"""
masked_scatter(mask, tensor) -> Tensor
Out-of-place version of :meth:`torch.Tensor.masked_scatter_`
""",
)
add_docstr_all(
"xlogy",
r"""
xlogy(other) -> Tensor
See :func:`torch.xlogy`
""",
)
add_docstr_all(
"xlogy_",
r"""
xlogy_(other) -> Tensor
In-place version of :meth:`~Tensor.xlogy`
""",
)
add_docstr_all(
"masked_fill",
r"""
masked_fill(mask, value) -> Tensor
Out-of-place version of :meth:`torch.Tensor.masked_fill_`
""",
)
add_docstr_all(
"grad",
r"""
This attribute is ``None`` by default and becomes a Tensor the first time a call to
:func:`backward` computes gradients for ``self``.
The attribute will then contain the gradients computed and future calls to
:func:`backward` will accumulate (add) gradients into it.
""",
)
add_docstr_all(
"retain_grad",
r"""
retain_grad() -> None
Enables this Tensor to have their :attr:`grad` populated during
:func:`backward`. This is a no-op for leaf tensors.
""",
)
add_docstr_all(
"retains_grad",
r"""
Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
populated during :func:`backward`, ``False`` otherwise.
""",
)
add_docstr_all(
"requires_grad",
r"""
Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.
.. note::
The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
attribute will be populated, see :attr:`is_leaf` for more details.
""",
)
add_docstr_all(
"is_leaf",
r"""
All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.
For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
created by the user. This means that they are not the result of an operation and so
:attr:`grad_fn` is None.
Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.
Example::
>>> a = torch.rand(10, requires_grad=True)
>>> a.is_leaf
True
>>> b = torch.rand(10, requires_grad=True).cuda()
>>> b.is_leaf
False
# b was created by the operation that cast a cpu Tensor into a cuda Tensor
>>> c = torch.rand(10, requires_grad=True) + 2
>>> c.is_leaf
False
# c was created by the addition operation
>>> d = torch.rand(10).cuda()
>>> d.is_leaf
True
# d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
>>> e = torch.rand(10).cuda().requires_grad_()
>>> e.is_leaf
True
# e requires gradients and has no operations creating it
>>> f = torch.rand(10, requires_grad=True, device="cuda")
>>> f.is_leaf
True
# f requires grad, has no operation creating it
""",
)
add_docstr_all(
"names",
r"""
Stores names for each of this tensor's dimensions.
``names[idx]`` corresponds to the name of tensor dimension ``idx``.
Names are either a string if the dimension is named or ``None`` if the
dimension is unnamed.
Dimension names may contain characters or underscore. Furthermore, a dimension
name must be a valid Python variable name (i.e., does not start with underscore).
Tensors may not have two named dimensions with the same name.
.. warning::
The named tensor API is experimental and subject to change.
""",
)
add_docstr_all(
"is_cuda",
r"""
Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
""",
)
add_docstr_all(
"is_cpu",
r"""
Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
""",
)
add_docstr_all(
"is_ipu",
r"""
Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
""",
)
add_docstr_all(
"is_xpu",
r"""
Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
""",
)
add_docstr_all(
"is_quantized",
r"""
Is ``True`` if the Tensor is quantized, ``False`` otherwise.
""",
)
add_docstr_all(
"is_meta",
r"""
Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors
are like normal tensors, but they carry no data.
""",
)
add_docstr_all(
"is_mps",
r"""
Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
""",
)
add_docstr_all(
"is_sparse",
r"""
Is ``True`` if the Tensor uses sparse storage layout, ``False`` otherwise.
""",
)
add_docstr_all(
"is_sparse_csr",
r"""
Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
""",
)
add_docstr_all(
"device",
r"""
Is the :class:`torch.device` where this Tensor is.
""",
)
add_docstr_all(
"ndim",
r"""
Alias for :meth:`~Tensor.dim()`
""",
)
add_docstr_all(
"T",
r"""
Returns a view of this tensor with its dimensions reversed.
If ``n`` is the number of dimensions in ``x``,
``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.
.. warning::
The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
the dimensions of a tensor.
""",
)
add_docstr_all(
"H",
r"""
Returns a view of a matrix (2-D tensor) conjugated and transposed.
``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
``x.transpose(0, 1)`` for real matrices.
.. seealso::
:attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
""",
)
add_docstr_all(
"mT",
r"""
Returns a view of this tensor with the last two dimensions transposed.
``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
""",
)
add_docstr_all(
"mH",
r"""
Accessing this property is equivalent to calling :func:`adjoint`.
""",
)
add_docstr_all(
"adjoint",
r"""
adjoint() -> Tensor
Alias for :func:`adjoint`
""",
)
add_docstr_all(
"real",
r"""
Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
The returned tensor and :attr:`self` share the same underlying storage.
Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.real
tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
""",
)
add_docstr_all(
"imag",
r"""
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`imag` is only supported for tensors with complex dtypes.
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.imag
tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
""",
)
add_docstr_all(
"as_subclass",
r"""
as_subclass(cls) -> Tensor
Makes a ``cls`` instance with the same data pointer as ``self``. Changes
in the output mirror changes in ``self``, and the output stays attached
to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
""",
)
add_docstr_all(
"crow_indices",
r"""
crow_indices() -> IntTensor
Returns the tensor containing the compressed row indices of the :attr:`self`
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
matrix multiplication, it is necessary to use ``int32`` indexing in order
to avoid downcasting and potentially losing information.
Example::
>>> csr = torch.eye(5,5).to_sparse_csr()
>>> csr.crow_indices()
tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)
""",
)
add_docstr_all(
"col_indices",
r"""
col_indices() -> IntTensor
Returns the tensor containing the column indices of the :attr:`self`
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
matrix multiplication, it is necessary to use ``int32`` indexing in order
to avoid downcasting and potentially losing information.
Example::
>>> csr = torch.eye(5,5).to_sparse_csr()
>>> csr.col_indices()
tensor([0, 1, 2, 3, 4], dtype=torch.int32)
""",
)
add_docstr_all(
"to_padded_tensor",
r"""
to_padded_tensor(padding, output_size=None) -> Tensor
Returns a new (non-nested) Tensor by padding the nested tensor.
The leading entries will be filled with the nested data,
while the trailing entries will be padded.
.. warning::
:func:`to_padded_tensor` always copies the underlying data,
since the nested and the non-nested tensors differ in memory layout.
Args:
padding (float): The padding value for the trailing entries.
output_size (Tuple[int]): The size of the output tensor.
If given, it must be large enough to contain all nested data;
else, will infer by taking the max size of each nested sub-tensor along each dimension.
Example::
>>> nt = torch.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))])
nested_tensor([
tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]),
tensor([[-1.8546, -0.7194, -0.2918, -0.1846],
[ 0.2773, 0.8793, -0.5183, -0.6447],
[ 1.8009, 1.8468, -0.9832, -1.5272]])
])
>>> pt_infer = nt.to_padded_tensor(0.0)
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],
[[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000],
[ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000],
[ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]])
>>> pt_large = nt.to_padded_tensor(1.0, (2, 4, 6))
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]],
[[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000],
[ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000],
[ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
>>> pt_small = nt.to_padded_tensor(2.0, (2, 2, 2))
RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported.
""",
)
|
pytorch-master
|
torch/_tensor_docs.py
|
# Copyright (c) 2010-2017 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
inf = math.inf
nan = math.nan
string_classes = (str, bytes)
def with_metaclass(meta: type, *bases) -> type:
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta): # type: ignore[misc, valid-type]
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, "temporary_class", (), {})
|
pytorch-master
|
torch/_six.py
|
r"""
The torch package contains data structures for multi-dimensional
tensors and defines mathematical operations over these tensors.
Additionally, it provides many utilities for efficient serializing of
Tensors and arbitrary types, and other useful utilities.
It has a CUDA counterpart, that enables you to run your tensor computations
on an NVIDIA GPU with compute capability >= 3.0.
"""
import os
import sys
import platform
import textwrap
import ctypes
import inspect
if sys.version_info < (3,):
raise Exception("Python 2 has reached end-of-life and is no longer supported by PyTorch.")
from ._utils import _import_dotted_name, classproperty
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
if sys.executable == 'torch_deploy':
__version__ = "torch-deploy-1.8"
else:
from .torch_version import __version__ as __version__
from ._six import string_classes as _string_classes
from typing import Set, Type, TYPE_CHECKING, Union, Callable
import builtins
__all__ = [
'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
'TypedStorage', 'UntypedStorage',
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
'lobpcg', 'use_deterministic_algorithms',
'are_deterministic_algorithms_enabled',
'is_deterministic_algorithms_warn_only_enabled',
'set_deterministic_debug_mode', 'get_deterministic_debug_mode',
'set_float32_matmul_precision', 'get_float32_matmul_precision',
'set_warn_always', 'is_warn_always_enabled',
]
################################################################################
# Load the extension module
################################################################################
if sys.platform == 'win32':
pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
# When users create a virtualenv that inherits the base environment,
# we will need to add the corresponding library directory into
# DLL search directories. Otherwise, it will rely on `PATH` which
# is dependent on user settings.
if sys.exec_prefix != sys.base_exec_prefix:
base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin')
else:
base_py_dll_path = ''
dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path]))
if all([not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths]):
nvtoolsext_dll_path = os.path.join(
os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64')
else:
nvtoolsext_dll_path = ''
from .version import cuda as cuda_version
import glob
if cuda_version and all([not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths]):
cuda_version_1 = cuda_version.replace('.', '_')
cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version)
cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
else:
cuda_path = ''
dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path]))
kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
prev_error_mode = kernel32.SetErrorMode(0x0001)
kernel32.LoadLibraryW.restype = ctypes.c_void_p
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
kernel32.LoadLibraryExW.restype = ctypes.c_void_p
for dll_path in dll_paths:
if sys.version_info >= (3, 8):
os.add_dll_directory(dll_path)
elif with_load_library_flags:
res = kernel32.AddDllDirectory(dll_path)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error adding "{dll_path}" to the DLL directories.'
raise err
try:
ctypes.CDLL('vcruntime140.dll')
ctypes.CDLL('msvcp140.dll')
ctypes.CDLL('vcruntime140_1.dll')
except OSError:
print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')
dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
path_patched = False
for dll in dlls:
is_loaded = False
if with_load_library_flags:
res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
last_error = ctypes.get_last_error()
if res is None and last_error != 126:
err = ctypes.WinError(last_error)
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
raise err
elif res is not None:
is_loaded = True
if not is_loaded:
if not path_patched:
os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
path_patched = True
res = kernel32.LoadLibraryW(dll)
if res is None:
err = ctypes.WinError(ctypes.get_last_error())
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
raise err
kernel32.SetErrorMode(prev_error_mode)
# See Note [Global dependencies]
def _load_global_deps():
if platform.system() == 'Windows' or sys.executable == 'torch_deploy':
return
lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
here = os.path.abspath(__file__)
lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
platform.system() != 'Windows':
# Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
# few circumstances:
#
# 1. You're in a build environment (e.g., fbcode) where
# libtorch_global_deps is not available, but you still need
# to get mkl to link in with RTLD_GLOBAL or it will just
# not work.
#
# 2. You're trying to run PyTorch under UBSAN and you need
# to ensure that only one copy of libtorch is loaded, so
# vptr checks work properly
#
# If you're using this setting, you must verify that all the libraries
# you load consistently use the same libstdc++, or you may have
# mysterious segfaults.
#
import os as _dl_flags
if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_LAZY'):
try:
# next try if DLFCN exists
import DLFCN as _dl_flags # type: ignore[import, no-redef]
except ImportError:
# as a last attempt, use compile-time constants
import torch._dl as _dl_flags # type: ignore[import, no-redef]
old_flags = sys.getdlopenflags()
sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)
from torch._C import * # noqa: F403
sys.setdlopenflags(old_flags)
del old_flags
del _dl_flags
else:
# Easy way. You want this most of the time, because it will prevent
# C++ symbols from libtorch clobbering C++ symbols from other
# libraries, leading to mysterious segfaults.
#
# If building in an environment where libtorch_global_deps isn't available
# like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
# want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
#
# See Note [Global dependencies]
if USE_GLOBAL_DEPS:
_load_global_deps()
from torch._C import * # noqa: F403
# Appease the type checker; ordinarily this binding is inserted by the
# torch._C module initialization code in C
if TYPE_CHECKING:
import torch._C as _C
# Check to see if we can load C extensions, and if not provide some guidance
# on what the problem might be.
try:
# _initExtension is chosen (arbitrarily) as a sentinel.
from torch._C import _initExtension
except ImportError:
import torch._C as _C_for_compiled_check
# The __file__ check only works for Python 3.7 and above.
if sys.version_info >= (3, 7) and _C_for_compiled_check.__file__ is None:
raise ImportError(textwrap.dedent('''
Failed to load PyTorch C extensions:
It appears that PyTorch has loaded the `torch/_C` folder
of the PyTorch repository rather than the C extensions which
are expected in the `torch._C` namespace. This can occur when
using the `install` workflow. e.g.
$ python setup.py install && python -c "import torch"
This error can generally be solved using the `develop` workflow
$ python setup.py develop && python -c "import torch" # This should succeed
or by running Python from a different directory.
''').strip()) from None
raise # If __file__ is not None the cause is unknown, so just re-raise.
for name in dir(_C):
if name[0] != '_' and not name.endswith('Base'):
__all__.append(name)
obj = getattr(_C, name)
if (isinstance(obj, Callable) or inspect.isclass(obj)): # type: ignore[arg-type]
if (obj.__module__ != 'torch'):
# TODO: fix their module from C++ side
if name not in ['DisableTorchFunction', 'Generator']:
obj.__module__ = 'torch'
if not TYPE_CHECKING:
# issue 38137 and python issue 43367. Submodules of a C extension are
# non-standard, and attributes of those submodules cannot be pickled since
# pickle expect to be able to import them as "from _C.sub import attr"
# which fails with "_C is not a package
for attr in dir(_C):
candidate = getattr(_C, attr)
if type(candidate) is type(_C):
# submodule
if f'torch._C.{attr}' not in sys.modules:
sys.modules[f'torch._C.{attr}'] = candidate
################################################################################
# Define basic utilities
################################################################################
def typename(o):
if isinstance(o, torch.Tensor):
return o.type()
module = ''
class_name = ''
if hasattr(o, '__module__') and o.__module__ != 'builtins' \
and o.__module__ != '__builtin__' and o.__module__ is not None:
module = o.__module__ + '.'
if hasattr(o, '__qualname__'):
class_name = o.__qualname__
elif hasattr(o, '__name__'):
class_name = o.__name__
else:
class_name = o.__class__.__name__
return module + class_name
def is_tensor(obj):
r"""Returns True if `obj` is a PyTorch tensor.
Note that this function is simply doing ``isinstance(obj, Tensor)``.
Using that ``isinstance`` check is better for typechecking with mypy,
and more explicit - so it's recommended to use that instead of
``is_tensor``.
Args:
obj (Object): Object to test
Example::
>>> x=torch.tensor([1,2,3])
>>> torch.is_tensor(x)
True
"""
return isinstance(obj, torch.Tensor)
def is_storage(obj):
r"""Returns True if `obj` is a PyTorch storage object.
Args:
obj (Object): Object to test
"""
return type(obj) in _storage_classes
def set_default_tensor_type(t):
r"""Sets the default ``torch.Tensor`` type to floating point tensor type
``t``. This type will also be used as default floating point type for
type inference in :func:`torch.tensor`.
The default floating point tensor type is initially ``torch.FloatTensor``.
Args:
t (type or string): the floating point tensor type or its name
Example::
>>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
>>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_tensor_type(torch.DoubleTensor)
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
"""
if isinstance(t, _string_classes):
t = _import_dotted_name(t)
_C._set_default_tensor_type(t)
def set_default_dtype(d):
r"""
Sets the default floating point dtype to :attr:`d`. Supports torch.float32
and torch.float64 as inputs. Other dtypes may be accepted without complaint
but are not supported and are unlikely to work as expected.
When PyTorch is initialized its default floating point dtype is torch.float32,
and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
type inference. The default floating point dtype is used to:
1. Implicitly determine the default complex dtype. When the default floating point
type is float32 the default complex dtype is complex64, and when the default
floating point type is float64 the default complex type is complex128.
2. Infer the dtype for tensors constructed using Python floats or complex Python
numbers. See examples below.
3. Determine the result of type promotion between bool and integer tensors and
Python floats and complex Python numbers.
Args:
d (:class:`torch.dtype`): the floating point dtype to make the default.
Either torch.float32 or torch.float64.
Example:
>>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
>>> # initial default for floating point is torch.float32
>>> # Python floats are interpreted as float32
>>> torch.tensor([1.2, 3]).dtype
torch.float32
>>> # initial default for floating point is torch.complex64
>>> # Complex Python numbers are interpreted as complex64
>>> torch.tensor([1.2, 3j]).dtype
torch.complex64
>>> torch.set_default_dtype(torch.float64)
>>> # Python floats are now interpreted as float64
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
torch.float64
>>> # Complex Python numbers are now interpreted as complex128
>>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
torch.complex128
"""
_C._set_default_dtype(d)
def use_deterministic_algorithms(mode, *, warn_only=False):
r""" Sets whether PyTorch operations must use "deterministic"
algorithms. That is, algorithms which, given the same input, and when
run on the same software and hardware, always produce the same output.
When enabled, operations will use deterministic algorithms when available,
and if only nondeterministic algorithms are available they will throw a
:class:`RuntimeError` when called.
.. note:: This setting alone is not always enough to make an application
reproducible. Refer to :ref:`reproducibility` for more information.
.. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
interface for this feature.
The following normally-nondeterministic operations will act
deterministically when ``mode=True``:
* :class:`torch.nn.Conv1d` when called on CUDA tensor
* :class:`torch.nn.Conv2d` when called on CUDA tensor
* :class:`torch.nn.Conv3d` when called on CUDA tensor
* :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
* :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
* :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
* :func:`torch.bmm` when called on sparse-dense CUDA tensors
* :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
and the index is a list of tensors
* :func:`torch.Tensor.index_put` with ``accumulate=False``
* :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
tensor
* :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
tensor
* :func:`torch.Tensor.scatter_add_` when ``input`` dimension is one and called
on a CUDA tensor
* :func:`torch.gather` when ``input`` dimension is one and called
on a CUDA tensor that requires grad
* :func:`torch.index_add` when called on CUDA tensor
* :func:`torch.index_select` when attempting to differentiate a CUDA tensor
* :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
* :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
The following normally-nondeterministic operations will throw a
:class:`RuntimeError` when ``mode=True``:
* :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
* :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
and one of the following modes is used:
- ``linear``
- ``bilinear``
- ``bicubic``
- ``trilinear``
* :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.NLLLoss` when called on a CUDA tensor
* :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
* :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
``mode='max'``
* :func:`torch.Tensor.scatter_add_` when ``input`` dimension is larger than one
and called on a CUDA tensor
* :func:`torch.gather` when ``input`` dimension is larger than one
and called on a CUDA tensor that requires grad
* :func:`torch.Tensor.put_` when ``accumulate=False``
* :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
* :func:`torch.histc` when called on a CUDA tensor
* :func:`torch.bincount` when called on a CUDA tensor
* :func:`torch.kthvalue` with called on a CUDA tensor
* :func:`torch.median` with indices output when called on a CUDA tensor
* :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
* :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
A handful of CUDA operations are nondeterministic if the CUDA version is
10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_
If one of these environment variable configurations is not set, a :class:`RuntimeError`
will be raised from these operations when called with CUDA tensors:
* :func:`torch.mm`
* :func:`torch.mv`
* :func:`torch.bmm`
Note that deterministic operations tend to have worse performance than
nondeterministic operations.
.. note::
This flag does not detect or prevent nondeterministic behavior caused
by calling an inplace operation on a tensor with an internal memory
overlap or by giving such a tensor as the :attr:`out` argument for an
operation. In these cases, multiple writes of different data may target
a single memory location, and the order of writes is not guaranteed.
Args:
mode (:class:`bool`): If True, makes potentially nondeterministic
operations switch to a deterministic algorithm or throw a runtime
error. If False, allows nondeterministic operations.
Keyword args:
warn_only (:class:`bool`, optional): If True, operations that do not
have a deterministic implementation will throw a warning instead of
an error. Default: ``False``
Example::
>>> torch.use_deterministic_algorithms(True)
# Forward mode nondeterministic error
>>> # xdoctest: +SKIP
>>> torch.randn(10, device='cuda').kthvalue(0)
...
RuntimeError: kthvalue CUDA does not have a deterministic implementation...
# Backward mode nondeterministic error
>>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
...
RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
"""
_C._set_deterministic_algorithms(mode, warn_only=warn_only)
def are_deterministic_algorithms_enabled():
r"""Returns True if the global deterministic flag is turned on. Refer to
:func:`torch.use_deterministic_algorithms` documentation for more details.
"""
return _C._get_deterministic_algorithms()
def is_deterministic_algorithms_warn_only_enabled():
r"""Returns True if the global deterministic flag is set to warn only.
Refer to :func:`torch.use_deterministic_algorithms` documentation for more
details.
"""
return _C._get_deterministic_algorithms_warn_only()
def set_deterministic_debug_mode(debug_mode: Union[builtins.int, str]) -> None:
r"""Sets the debug mode for deterministic operations.
.. note:: This is an alternative interface for
:func:`torch.use_deterministic_algorithms`. Refer to that function's
documentation for details about affected operations.
Args:
debug_mode(str or int): If "default" or 0, don't error or warn on
nondeterministic operations. If "warn" or 1, warn on
nondeterministic operations. If "error" or 2, error on
nondeterministic operations.
"""
# NOTE: builtins.int is used here because int in this scope resolves
# to torch.int
if not isinstance(debug_mode, (builtins.int, str)):
raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')
if isinstance(debug_mode, str):
if debug_mode == 'default':
debug_mode = 0
elif debug_mode == 'warn':
debug_mode = 1
elif debug_mode == 'error':
debug_mode = 2
else:
raise RuntimeError(
'invalid value of debug_mode, expected one of `default`, '
f'`warn`, `error`, but got {debug_mode}')
if debug_mode == 0:
_C._set_deterministic_algorithms(False)
elif debug_mode == 1:
_C._set_deterministic_algorithms(True, warn_only=True)
elif debug_mode == 2:
_C._set_deterministic_algorithms(True)
else:
raise RuntimeError(
'invalid value of debug_mode, expected 0, 1, or 2, '
f'but got {debug_mode}')
def get_deterministic_debug_mode() -> builtins.int:
r"""Returns the current value of the debug mode for deterministic
operations. Refer to :func:`torch.set_deterministic_debug_mode`
documentation for more details.
"""
if _C._get_deterministic_algorithms():
if _C._get_deterministic_algorithms_warn_only():
return 1
else:
return 2
else:
return 0
def get_float32_matmul_precision() -> builtins.str:
r"""Returns the current value of float32 matrix multiplication precision. Refer to
:func:`torch.set_float32_matmul_precision` documentation for more details.
"""
return _C._get_float32_matmul_precision()
def set_float32_matmul_precision(precision):
r"""Sets the internal precision of float32 matrix multiplications.
Running float32 matrix multiplications in lower precision may significantly increase
performance, and in some programs the loss of precision has a negligible impact.
Supports three settings:
* "highest", float32 matrix multiplications use the float32 datatype for
internal computations.
* "high", float32 matrix multiplications use the TensorFloat32 or bfloat16_3x
datatypes for internal computations, if fast matrix multiplication algorithms
using those datatypes internally are available. Otherwise float32
matrix multiplications are computed as if the precision is "highest".
* "medium", float32 matrix multiplications use the bfloat16 datatype for
internal computations, if a fast matrix multiplication algorithm
using that datatype internally is available. Otherwise float32
matrix multiplications are computed as if the precision is "high".
.. note::
This does not change the output dtype of float32 matrix multiplications,
it controls how the internal computation of the matrix multiplication is performed.
.. note::
This does not change the precision of convolution operations. Other flags,
like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
operations.
.. note::
This flag currently only affects one native device type: CUDA.
If "high" or "medium" are set then the TensorFloat32 datatype will be used
when computing float32 matrix multiplications, equivalent to setting
`torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
is set then the float32 datatype is used for internal computations, equivalent
to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
Args:
precision(str): can be set to "highest" (default), "high", or "medium" (see above).
"""
_C._set_float32_matmul_precision(precision)
def set_warn_always(b):
r"""When this flag is False (default) then some PyTorch warnings may only
appear once per process. This helps avoid excessive warning information.
Setting it to True causes these warnings to always appear, which may be
helpful when debugging.
Args:
b (:class:`bool`): If True, force warnings to always be emitted
If False, set to the default behaviour
"""
_C._set_warnAlways(b)
def is_warn_always_enabled():
r"""Returns True if the global warn_always flag is turned on. Refer to
:func:`torch.set_warn_always` documentation for more details.
"""
return _C._get_warnAlways()
################################################################################
# Define numeric constants
################################################################################
# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
from math import e , nan , inf , pi
__all__.extend(['e', 'pi', 'nan', 'inf'])
################################################################################
# Define Storage and Tensor classes
################################################################################
from ._tensor import Tensor
from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage
# NOTE: New <type>Storage classes should never be added. When adding a new
# dtype, use torch.storage.TypedStorage directly.
class ByteStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.uint8
class DoubleStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.double
class FloatStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.float
class HalfStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.half
class LongStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.long
class IntStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.int
class ShortStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.short
class CharStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.int8
class BoolStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.bool
class BFloat16Storage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.bfloat16
class ComplexDoubleStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.cdouble
class ComplexFloatStorage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.cfloat
class QUInt8Storage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.quint8
class QInt8Storage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.qint8
class QInt32Storage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.qint32
class QUInt4x2Storage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.quint4x2
class QUInt2x4Storage(_LegacyStorage):
@classproperty
def dtype(self):
return torch.quint2x4
_storage_classes = {
UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage,
ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage,
QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage,
ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage,
TypedStorage
}
# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()
_tensor_classes: Set[Type] = set()
# If you edit these imports, please update torch/__init__.py.in as well
from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
from .serialization import save, load
from ._tensor_str import set_printoptions
################################################################################
# Initialize extension
################################################################################
def manager_path():
if platform.system() == 'Windows' or sys.executable == 'torch_deploy':
return b""
path = get_file_path('torch', 'bin', 'torch_shm_manager')
prepare_multiprocessing_environment(get_file_path('torch'))
if not os.path.exists(path):
raise RuntimeError("Unable to find torch_shm_manager at " + path)
return path.encode('utf-8')
from torch.amp import autocast
# Shared memory manager needs to know the exact location of manager executable
_C._initExtension(manager_path())
del manager_path
# Appease the type checker: it can't deal with direct setting of globals().
# Note that we will see "too many" functions when reexporting this way; there
# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
# so that this import is good enough
if TYPE_CHECKING:
# Some type signatures pulled in from _VariableFunctions here clash with
# signatures already imported. For now these clashes are ignored; see
# PR #43339 for details.
from torch._C._VariableFunctions import * # type: ignore[misc] # noqa: F403
# Ops not to be exposed in `torch` namespace,
# mostly helper ops.
PRIVATE_OPS = (
'unique_dim',
)
for name in dir(_C._VariableFunctions):
if name.startswith('__') or name in PRIVATE_OPS:
continue
obj = getattr(_C._VariableFunctions, name)
obj.__module__ = 'torch'
globals()[name] = obj
if not name.startswith("_"):
__all__.append(name)
################################################################################
# Import interface functions defined in Python
################################################################################
# needs to be after the above ATen bindings so we can overwrite from Python side
from .functional import * # noqa: F403
################################################################################
# Remove unnecessary members
################################################################################
del _StorageBase
del _LegacyStorage
################################################################################
# Define _assert
################################################################################
# needs to be before the submodule imports to avoid circular dependencies
def _assert(condition, message):
r"""A wrapper around Python's assert which is symbolically traceable.
"""
from .overrides import has_torch_function, handle_torch_function
if type(condition) is not torch.Tensor and has_torch_function((condition,)):
return handle_torch_function(_assert, (condition,), condition, message)
assert condition, message
################################################################################
# Import most common subpackages
################################################################################
# Use the redundant form so that type checkers know that these are a part of
# the public API. The "regular" import lines are there solely for the runtime
# side effect of adding to the imported module's members for other users.
from torch import cuda as cuda
from torch import cpu as cpu
from torch import autograd as autograd
from torch.autograd import (
no_grad as no_grad,
enable_grad as enable_grad,
set_grad_enabled as set_grad_enabled,
inference_mode as inference_mode,
)
from torch import fft as fft
from torch import futures as futures
from torch import nn as nn
from torch import optim as optim
import torch.optim._multi_tensor
from torch import multiprocessing as multiprocessing
from torch import sparse as sparse
from torch import special as special
import torch.utils.backcompat
from torch import onnx as onnx
from torch import jit as jit
from torch import linalg as linalg
from torch import hub as hub
from torch import random as random
from torch import distributions as distributions
from torch import testing as testing
import torch.backends.cuda
import torch.backends.mps
import torch.backends.cudnn
import torch.backends.mkl
import torch.backends.mkldnn
import torch.backends.openmp
import torch.backends.quantized
import torch.utils.data
from torch import __config__ as __config__
from torch import __future__ as __future__
from torch import profiler as profiler
# Quantized, sparse, AO, etc. should be last to get imported, as nothing
# is expected to depend on them.
import torch.nn.intrinsic
import torch.nn.quantizable
import torch.nn.quantized
# AO depends on nn, as well as quantized stuff -- so should be after those.
from torch import ao as ao
_C._init_names(list(torch._storage_classes))
# attach docstrings to torch and tensor functions
from . import _torch_docs, _tensor_docs, _storage_docs
del _torch_docs, _tensor_docs, _storage_docs
def compiled_with_cxx11_abi():
r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
return _C._GLIBCXX_USE_CXX11_ABI
# Import the ops "namespace"
from torch._ops import ops
from torch._classes import classes
# quantization depends on torch.fx
# Import quantization
from torch import quantization as quantization
# Import the quasi random sampler
from torch import quasirandom as quasirandom
# If you are seeing this, it means that this call site was not checked if
# the memory format could be preserved, and it was switched to old default
# behaviour of contiguous
legacy_contiguous_format = contiguous_format
# Register fork handler to initialize OpenMP in child processes (see gh-28389)
from torch.multiprocessing._atfork import register_after_fork
register_after_fork(torch.get_num_threads)
del register_after_fork
# Import tools that require fully imported torch (for applying
# torch.jit.script as a decorator, for instance):
from ._lobpcg import lobpcg as lobpcg
from ._vmap_internals import vmap as vmap
# These were previously defined in native_functions.yaml and appeared on the
# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
# class usage. We add these lines here to preserve backward compatibility.
quantized_lstm = torch.ops.aten.quantized_lstm
quantized_gru = torch.ops.aten.quantized_gru
from torch.utils.dlpack import from_dlpack, to_dlpack
# Import experimental masked operations support. See
# [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
# information.
from . import _masked
# Import removed ops with error message about removal
from ._linalg_utils import solve
def _register_device_module(device_type, module):
r"""Register an external runtime module of the specific :attr:`device_type`
supported by torch.
After the :attr:`module` is registered correctly, the user can refer
the external runtime module as part of torch with attribute torch.xxx.
"""
# Make sure the device_type represent a supported device type for torch.
device_type = torch.device(device_type).type
m = sys.modules[__name__]
if hasattr(m, device_type):
raise RuntimeError("The runtime module of '{}' has already "
"been registered with '{}'".format(device_type, getattr(m, device_type)))
setattr(m, device_type, module)
torch_module_name = '.'.join([__name__, device_type])
sys.modules[torch_module_name] = module
# expose return_types
from . import return_types
if sys.executable != 'torch_deploy':
from . import library
if not TYPE_CHECKING:
from . import _meta_registrations
|
pytorch-master
|
torch/__init__.py
|
"""
Python implementation of ``__torch_function__``
While most of the torch API and handling for ``__torch_function__`` happens
at the C++ level, some of the torch API is written in Python so we need
python-level handling for ``__torch_function__`` overrides as well. The main
developer-facing functionality in this file are handle_torch_function and
has_torch_function. See torch/functional.py and test/test_overrides.py
for usage examples.
Note
----
heavily inspired by NumPy's ``__array_function__`` (see:
https://github.com/pytorch/pytorch/issues/24015 and
https://www.numpy.org/neps/nep-0018-array-function-protocol.html
)
If changing this file in a way that can affect ``__torch_function__`` overhead,
please report the benchmarks in ``benchmarks/overrides_benchmark``. See the
instructions in the ``README.md`` in that directory.
"""
import __future__
import collections
import functools
import types
import warnings
from typing import Dict, Set, List, Any, Callable, Iterable, Type, Iterator, Tuple
import contextlib
import torch
from torch._C import (
_has_torch_function, _has_torch_function_unary,
_has_torch_function_variadic, _add_docstr, _set_torch_function_mode, _get_torch_function_mode)
from torch.utils._mode_utils import _enable_mode, _ModeInfo, _wrap_init, _restore_mode
__all__ = [
"get_ignored_functions",
"get_overridable_functions",
"get_testing_overrides",
"handle_torch_function",
"has_torch_function",
"resolve_name",
"is_tensor_like",
"is_tensor_method_or_property",
"wrap_torch_function",
"enable_reentrant_dispatch",
"get_buffer",
]
@functools.lru_cache(None)
def get_ignored_functions() -> Set[Callable]:
"""
Return public functions that cannot be overridden by ``__torch_function__``.
Returns
-------
Set[Callable]
A tuple of functions that are publicly available in the torch API but cannot
be overridden with ``__torch_function__``. Mostly this is because none of the
arguments of these functions are tensors or tensor-likes.
Examples
--------
>>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions()
True
>>> torch.add in torch.overrides.get_ignored_functions()
False
"""
Tensor = torch.Tensor
return {
torch.typename,
torch.is_tensor,
torch.is_storage,
torch.set_default_tensor_type,
torch.set_rng_state,
torch.get_rng_state,
torch.manual_seed,
torch.initial_seed,
torch.seed,
torch.save,
torch.load,
torch.set_printoptions,
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
torch.parse_type_comment,
torch.set_anomaly_enabled,
torch.set_flush_denormal,
torch.set_num_interop_threads,
torch.set_num_threads,
torch.wait,
torch.as_tensor,
torch.from_numpy,
torch.get_device,
torch.tensor,
torch.default_generator,
torch.has_cuda,
torch.has_cudnn,
torch.has_lapack,
torch.device,
torch.dtype,
torch.finfo,
torch.has_mkl,
torch.has_mps,
torch.has_mkldnn,
torch.has_openmp,
torch.iinfo,
torch.memory_format,
torch.qscheme,
torch.set_grad_enabled,
torch.no_grad,
torch.enable_grad,
torch.inference_mode,
torch.is_inference_mode_enabled,
torch.layout,
torch.align_tensors,
torch.arange,
torch.as_strided,
torch.bartlett_window,
torch.blackman_window,
torch.broadcast_shapes,
torch.can_cast,
torch.cudnn_affine_grid_generator,
torch.cudnn_batch_norm,
torch.cudnn_convolution,
torch.cudnn_convolution_transpose,
torch.cudnn_convolution_relu,
torch.cudnn_convolution_add_relu,
torch.cudnn_grid_sampler,
torch.cudnn_is_acceptable,
torch.empty,
torch.empty_strided,
torch.empty_quantized,
torch.eye,
torch.fft.fftfreq,
torch.fft.rfftfreq,
torch.from_file,
torch.full,
torch.fill,
torch.hamming_window,
torch.hann_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.mkldnn_adaptive_avg_pool2d,
torch.mkldnn_convolution,
torch.mkldnn_max_pool2d,
torch.mkldnn_max_pool3d,
torch.mkldnn_linear_backward_weights,
torch.nested_tensor,
torch.normal,
torch.ones,
torch.promote_types,
torch.rand,
torch.randn,
torch.randint,
torch.randperm,
torch.range,
torch.result_type,
torch.scalar_tensor,
torch.sparse_coo_tensor,
torch.sparse_compressed_tensor,
torch.sparse_csr_tensor,
torch.sparse_csc_tensor,
torch.sparse_bsr_tensor,
torch.sparse_bsc_tensor,
torch.tril_indices,
torch.triu_indices,
torch.vander,
torch.zeros,
torch._jit_internal.boolean_dispatch,
torch.nn.functional.assert_int_or_pair,
torch.nn.functional.upsample,
torch.nn.functional.upsample_bilinear,
torch.nn.functional.upsample_nearest,
torch.nn.functional.has_torch_function,
torch.nn.functional.has_torch_function_unary,
torch.nn.functional.has_torch_function_variadic,
torch.nn.functional.handle_torch_function,
torch.nn.functional.sigmoid,
torch.nn.functional.hardsigmoid,
torch.nn.functional.tanh,
# Doesn't actually take or return tensor arguments
torch.nn.init.calculate_gain,
# These are deprecated; don't test them
torch.nn.init.uniform,
torch.nn.init.normal,
torch.nn.init.constant,
torch.nn.init.eye,
torch.nn.init.dirac,
torch.nn.init.xavier_uniform,
torch.nn.init.xavier_normal,
torch.nn.init.kaiming_uniform,
torch.nn.init.kaiming_normal,
torch.nn.init.orthogonal,
torch.nn.init.sparse,
has_torch_function,
handle_torch_function,
torch.set_autocast_enabled,
torch.is_autocast_enabled,
torch.clear_autocast_cache,
torch.set_autocast_cpu_enabled,
torch.is_autocast_cpu_enabled,
torch.set_autocast_cpu_dtype,
torch.get_autocast_cpu_dtype,
torch.get_autocast_gpu_dtype,
torch.set_autocast_gpu_dtype,
torch.autocast_increment_nesting,
torch.autocast_decrement_nesting,
torch.is_autocast_cache_enabled,
torch.set_autocast_cache_enabled,
torch.nn.functional.hardswish,
torch.is_vulkan_available,
torch.are_deterministic_algorithms_enabled,
torch.use_deterministic_algorithms,
torch.is_deterministic_algorithms_warn_only_enabled,
torch.set_deterministic_debug_mode,
torch.get_deterministic_debug_mode,
torch.set_float32_matmul_precision,
torch.get_float32_matmul_precision,
torch.unify_type_list,
torch.is_warn_always_enabled,
torch.set_warn_always,
torch.vitals_enabled,
torch.set_vital,
torch.read_vitals,
torch.frombuffer,
torch.asarray,
Tensor.__delitem__,
Tensor.__dir__,
Tensor.__getattribute__,
Tensor.__init__,
Tensor.__iter__,
Tensor.__init_subclass__,
Tensor.__delattr__,
Tensor.__setattr__,
Tensor.__torch_function__,
Tensor.__torch_dispatch__,
Tensor.__new__,
Tensor.__class__,
Tensor.__subclasshook__,
Tensor.__hash__,
Tensor.as_subclass,
Tensor.reinforce,
Tensor.new,
Tensor.new_tensor,
Tensor.new_empty,
Tensor.new_empty_strided,
Tensor.new_zeros,
Tensor.new_ones,
Tensor.new_full,
Tensor._make_subclass,
Tensor.solve,
Tensor.stride,
Tensor.unflatten,
Tensor.to_sparse_coo,
Tensor.to_sparse_csr,
Tensor.to_sparse_csc,
Tensor.to_sparse_bsr,
Tensor.to_sparse_bsc,
Tensor._reduce_ex_internal,
Tensor._fix_weakref,
Tensor._make_wrapper_subclass,
Tensor._python_dispatch.__get__,
Tensor._conj,
Tensor._conj_physical,
Tensor._neg_view,
Tensor._is_zerotensor,
Tensor._addmm_activation,
Tensor._nested_tensor_layer_norm,
Tensor.to_padded_tensor
}
@functools.lru_cache(None)
def get_default_nowrap_functions() -> Set[Callable]:
"""
Return public functions that do not wrap in a subclass when invoked by
the default ``Tensor.__torch_function__`` that preserves subclasses. Typically,
these functions represent field accesses (i.e., retrieving a Tensor that
is stored somewhere on the Tensor) as opposed to computation. Users of
these functions expect object identity to be preserved over multiple accesses
(e.g., ``a.grad is a.grad``) which cannot be upheld if we're wrapping on
the fly every time (furthermore, the tensor stored here might already be
the subclass, in which case wrapping really ought not to happen).
Not ALL property accessors have this property; for example ``Tensor.T`` actually
just creates a new transposed tensor on the fly, and so we SHOULD interpose on
these calls (you need to check the implementation of the function to see if
this is the case or not). Additionally, if a property accessor doesn't return a Tensor,
it doesn't have to be on this list (though it is harmless if it is).
"""
Tensor = torch.Tensor
return {
Tensor._base.__get__,
Tensor.grad.__get__,
Tensor._grad.__get__,
}
@functools.lru_cache(None)
def get_testing_overrides() -> Dict[Callable, Callable]:
"""Return a dict containing dummy overrides for all overridable functions
Returns
-------
Dict[Callable, Callable]
A dictionary that maps overridable functions in the PyTorch API to
lambda functions that have the same signature as the real function
and unconditionally return -1. These lambda functions are useful
for testing API coverage for a type that defines ``__torch_function__``.
Examples
--------
>>> import inspect
>>> my_add = torch.overrides.get_testing_overrides()[torch.add]
>>> inspect.signature(my_add)
<Signature (input, other, out=None)>
"""
# Every function in the PyTorchAPI that can be overriden needs an entry
# in this dict.
#
# Optimally we would use inspect to get the function signature and define
# the lambda function procedurally but that is blocked by generating
# function signatures for native kernels that can be consumed by inspect.
# See Issue #28233.
Tensor = torch.Tensor
ret: Dict[Callable, Callable] = {
torch.abs: lambda input, out=None: -1,
torch.absolute: lambda input, out=None: -1,
torch.adaptive_avg_pool1d: lambda input, output_size: -1,
torch.adaptive_max_pool1d: lambda inputs, output_size: -1,
torch.acos: lambda input, out=None: -1,
torch.adjoint: lambda input: -1,
torch.arccos: lambda input, out=None: -1,
torch.acosh: lambda input, out=None: -1,
torch.arccosh: lambda input, out=None: -1,
torch.add: lambda input, other, out=None: -1,
torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addcmul: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.addmv: lambda input, mat, vec, beta=1, alpha=1, out=None: -1,
torch.addr: lambda input, vec1, vec2, beta=1, alpha=1, out=None: -1,
torch.affine_grid_generator: lambda theta, size, align_corners: -1,
torch.all: lambda input, dim=None: -1,
torch.allclose: lambda input, other, trol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.alpha_dropout: lambda input, p, train, inplace=False: -1,
torch.amax: lambda input, dim=None: -1,
torch.amin: lambda input, dim=None: -1,
torch.aminmax: lambda input, dim=None, keepdim=False, out=None: -1,
torch.angle: lambda input, out=None: -1,
torch.any: lambda input, dim=None, keepdim=False, out=None: -1,
torch.argmax: lambda input: -1,
torch.argmin: lambda input: -1,
torch.argsort: lambda input, dim=None: -1,
torch.asin: lambda input, out=None: -1,
torch._assert_async: lambda input: -1,
torch.arcsin: lambda input, out=None: -1,
torch.asinh: lambda input, out=None: -1,
torch.arcsinh: lambda input, out=None: -1,
torch.atan: lambda input, out=None: -1,
torch.arctan: lambda input, out=None: -1,
torch.atan2: lambda input, other, out=None: -1,
torch.arctan2: lambda input, other, out=None: -1,
torch.atanh: lambda input, out=None: -1,
torch.arctanh: lambda input, out=None: -1,
torch.atleast_1d: lambda *tensors: -1,
torch.atleast_2d: lambda *tensors: -1,
torch.atleast_3d: lambda *tensors: -1,
torch.avg_pool1d: lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True: -1,
torch.baddbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled: -1,
torch.batch_norm_backward_elemt: lambda grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count_tensor: -1,
torch.batch_norm_backward_reduce: lambda grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g: -1,
torch.batch_norm_elemt: lambda input, weight, bias, mean, invstd, eps: -1,
torch.batch_norm_gather_stats: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_gather_stats_with_counts: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_stats: lambda input, eps: -1,
torch.batch_norm_update_stats: lambda input, running_mean, running_var, momentum: -1,
torch.bernoulli: lambda input, generator=None, out=None: -1,
torch.bilinear: lambda input1, input2, weight, bias: -1,
torch.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction='mean', pos_weight=None: -1),
torch.bincount: lambda input, weights=None, minlength=0: -1,
torch.binomial: lambda count, prob, generator=None: -1,
torch.bitwise_and: lambda input, other, out=None: -1,
torch.bitwise_not: lambda input, out=None: -1,
torch.bitwise_or: lambda input, other, out=None: -1,
torch.bitwise_xor: lambda input, other, out=None: -1,
torch.bitwise_left_shift: lambda input, other, out=None: -1,
torch.bitwise_right_shift: lambda input, other, out=None: -1,
torch.block_diag: lambda *tensors: -1,
torch.bmm: lambda input, mat2, out=None: -1,
torch.broadcast_tensors: lambda *tensors: -1,
torch.broadcast_to: lambda self, size: -1,
torch.bucketize: lambda input, boundaries, out_int32=False, right=False, out=None: -1,
torch.cartesian_prod: lambda *tensors: -1,
torch.cat: lambda tensors, dim=0, out=None: -1,
torch.concat: lambda tensors, dim=0, out=None: -1, # alias for torch.cat
torch.cdist: lambda x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary': -1,
torch.ceil: lambda input, out=None: -1,
torch.celu: lambda input, alhpa=1., inplace=False: -1,
torch.chain_matmul: lambda *matrices, out=None: -1,
torch.channel_shuffle: lambda input, groups : -1,
torch.cholesky: lambda input, upper=False, out=None: -1,
torch.linalg.cholesky: lambda input, out=None: -1,
torch.linalg.cholesky_ex: lambda input, check_errors=False, out=None: -1,
torch.cholesky_inverse: lambda input, upper=False, out=None: -1,
torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1,
torch.choose_qparams_optimized: lambda input, numel, n_bins, ratio, bit_width: -1,
torch.chunk: lambda input, chunks, dim=0: -1,
torch.clamp: lambda input, min=None, max=None, out=None: -1,
torch.clip: lambda input, min=None, max=None, out=None: -1,
torch.clamp_min: lambda input, min, out=None: -1,
torch.clamp_max: lambda input, max, out=None: -1,
torch.column_stack: lambda tensors, out=None: -1,
torch.cov: lambda input, correction=1, fweights=None, aweights=None: -1,
torch.clone: lambda input: -1,
torch.combinations: lambda input, r=2, with_replacement=False: -1,
torch.complex: lambda real, imag: -1,
torch.copysign: lambda input, other, out=None: -1,
torch.polar: lambda abs, ang: -1,
torch.linalg.cond: lambda input, ord=None: -1,
torch.conj: lambda input, out=None: -1,
torch.conj_physical: lambda input, out=None: -1,
torch.resolve_conj: lambda input, out=None: -1,
torch.resolve_neg: lambda input, out=None: -1,
torch.constant_pad_nd: lambda input, pad, value=0: -1,
torch.conv1d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv2d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv3d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.convolution: lambda input, weight, bias, stride, padding, dilation, transposed, output_adding, groups: -1,
torch.conv_tbc: lambda input, weight, bias, pad=0: -1,
torch.conv_transpose1d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose2d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose3d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.corrcoef: lambda input: -1,
torch.cos: lambda input, out=None: -1,
torch.cosine_embedding_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1,
torch.cosh: lambda input, out=None: -1,
torch.cosine_similarity: lambda x1, x2, dim=1, eps=1e-8: -1,
torch.count_nonzero: lambda input: -1,
torch.cross: lambda input, other, dim=None, out=None: -1,
torch.linalg.cross: lambda input, other, dim=-1, out=None: -1,
torch.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean',
zero_infinity=False: -1),
torch.cummax: lambda input, dim, out=None: -1,
torch.cummin: lambda input, dim, out=None: -1,
torch.cumprod: lambda input, dim, out=None, dtype=None: -1,
torch.cumsum: lambda input, dim, out=None, dtype=None: -1,
torch.cumulative_trapezoid: lambda y, x=None, dim=-1: -1,
torch.logcumsumexp: lambda input, dim, out=None: -1,
torch.deg2rad: lambda input, out=None: -1,
torch.dequantize: lambda input: -1,
torch.det: lambda input: -1,
torch.linalg.det: lambda input: -1, # alias for torch.det # type: ignore[attr-defined]
torch.detach: lambda input: -1,
torch.diag: lambda input, diagonal=0, out=None: -1,
torch.diag_embed: lambda input, diagonal=0, out=None: -1,
torch.diagflat: lambda input, offset=0: -1,
torch.diff: lambda input, n=1, dim=-1, prepend=None, append=None, out=None: -1,
torch.diagonal: lambda input, offset=0, dim1=0, dim2=1: -1,
torch.linalg.diagonal: lambda input, offset=0, dim1=-2, dim2=-1: -1,
torch.diagonal_scatter: lambda input, src, offset=0, dim1=0, dim2=1: -1,
torch.as_strided_scatter: lambda self, src, size, stride, storage_offset=None: -1,
torch.digamma: lambda input, out=None: -1,
torch.dist: lambda input, other, p=2: -1,
torch.div: lambda input, other, rounding_mode=None, out=None: -1,
torch.divide: lambda input, other, rounding_mode=None, out=None: -1,
torch.dot: lambda input, other, out=None: -1,
torch.dropout: lambda input, p, train, inplace=False: -1,
torch.dsmm: lambda input, mat2: -1,
torch.hsmm: lambda mat1, mat2: -1,
torch.dsplit: lambda input, indices_or_sections: -1,
torch.dstack: lambda tensors, out=None: -1,
torch.eig: lambda input, eigenvectors=False, out=None: -1,
torch.linalg.eig: lambda input, out=None: -1,
torch.linalg.eigvals: lambda input, out=None: -1,
torch.linalg.eigh: lambda input, UPLO="L", out=None: -1,
torch.linalg.eigvalsh: lambda input, UPLO="L", out=None: -1,
torch.einsum: lambda equation, *operands: -1,
torch.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False,
sparse=False: -1),
torch.embedding_bag: (lambda input, weight, offsets, max_norm=None, norm_type=2, scale_grad_by_freq=False,
mode='mean', sparse=False, per_sample_weights=None, padding_idx=None: -1),
torch.empty_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.eq: lambda input, other, out=None: -1,
torch.equal: lambda input, other: -1,
torch.erf: lambda input, out=None: -1,
torch.erfc: lambda input, out=None: -1,
torch.erfinv: lambda input, out=None: -1,
torch.exp: lambda input, out=None: -1,
torch.exp2: lambda input, out=None: -1,
torch.expm1: lambda input, out=None: -1,
torch.fake_quantize_per_channel_affine: lambda input, scale, zero_point, axis, quant_min, quant_max: -1,
torch.fake_quantize_per_tensor_affine: lambda input, scale, zero_point, quant_min, quant_max: -1,
torch.fused_moving_avg_obs_fake_quant: (lambda x, observer_on, fake_quant_on, averaging_const, running_min,
running_max, scale, zero_point, quant_min, quant_max, ch_axis,
per_row_fake_quant=False, symmetric_quant=False: -1),
torch.fbgemm_linear_fp16_weight: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_fp16_weight_fp32_activation: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_int8_weight: lambda input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias: -1,
torch.fbgemm_linear_int8_weight_fp32_activation: (lambda input, weight, packed, col_offsets, weight_scale,
weight_zero_point, bias: -1),
torch.fbgemm_linear_quantize_weight: lambda input: -1,
torch.fbgemm_pack_gemm_matrix_fp16: lambda input: -1,
torch.fbgemm_pack_quantized_matrix: lambda input, a, b: -1,
torch.feature_alpha_dropout: lambda input, p, train: -1,
torch.feature_dropout: lambda input, p, train: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ifft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.rfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.irfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.hfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ihfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.hfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.ihfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.hfftn: lambda input, s=None, dim=-1, norm=None: -1,
torch.fft.ihfftn: lambda input, s=None, dim=-1, norm=None: -1,
torch.fft.fftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.ifftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.rfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.irfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.fft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.ifft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.rfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.irfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.fftshift: lambda input, dim=None: -1,
torch.fft.ifftshift: lambda input, dim=None: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fix: lambda input, out=None: -1,
torch.flatten: lambda input, start_dim=0, end_dim=-1: -1,
torch.flip: lambda input, dims: -1,
torch.fliplr: lambda input: -1,
torch.flipud: lambda input: -1,
torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1,
torch.floor: lambda input, out=None: -1,
torch.floor_divide: lambda input, other: -1,
torch.float_power: lambda input, exponent, out=None: -1,
torch.fmod: lambda input, other, out=None: -1,
torch.frac: lambda input, out=None: -1,
torch.frexp: lambda input, out=None: -1,
torch.full_like: lambda input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.lu_unpack: lambda LU_data, LU_pivots, unpack_data=True, unpack_pivots=True: -1,
torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1,
torch.gcd: lambda input, other, out=None: -1,
torch.ge: lambda input, other, out=None: -1,
torch.greater_equal: lambda input, other, out=None: -1,
torch.geqrf: lambda input, out=None: -1,
torch.i0: lambda input, out=None: -1,
torch.inner: lambda input, other, out=None: -1,
torch.outer: lambda input, vec2, out=None: -1,
torch.ger: lambda input, vec2, out=None: -1, # alias for torch.outer
torch.gradient: lambda input, spacing=None, dim=None, edge_order=1: -1,
torch.grid_sampler: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_2d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_3d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05, cudnn_enabled=True: -1,
torch.gru: lambda input, hx, params, has_biases, num_layers, gropout, train, bidirectional, batch_first: -1,
torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.gt: lambda input, other, out=None: -1,
torch.greater: lambda input, other, out=None: -1,
torch.hardshrink: lambda input, lambd=0.5: -1,
torch.heaviside: lambda input, values, out=None: -1,
torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, reduction='mean': -1,
torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1,
torch.histogram: lambda input, bins=100, min=None, max=None, weight=None, density=False, out=None: -1,
torch.histogramdd: lambda input, bins, range=None, weight=None, density=False: -1,
torch.linalg.householder_product: lambda input, tau: -1,
torch.hspmm: lambda mat1, mat2, out=None: -1,
torch.hsplit: lambda input, indices_or_sections: -1,
torch.hstack: lambda tensors, out=None: -1,
torch.hypot: lambda input, other, out=None: -1,
torch.igamma: lambda input, other, out=None: -1,
torch.igammac: lambda input, other, out=None: -1,
torch.imag: lambda input, out=None: -1,
torch.index_add: lambda input, dim, index, source: -1,
torch.index_copy: lambda input, dim, index, source: -1,
torch.index_put: lambda input, indices, values, accumulate=False: -1,
torch.index_select: lambda input, dim, index, out=None: -1,
torch.index_fill: lambda input, dim, index, value: -1,
torch.index_reduce: lambda input, dim, index, source, reduce, include_input=True: -1,
torch.isfinite: lambda tensor: -1,
torch.isin: lambda e, te, assume_unique=False, invert=False: -1,
torch.isinf: lambda tensor: -1,
torch.isreal: lambda tensor: -1,
torch.isposinf: lambda input, out=None: -1,
torch.isneginf: lambda input, out=None: -1,
torch.instance_norm: (lambda input, running_mean, running_var, weight, bias, use_input_stats, momentum, eps,
cudnn_enabled: -1),
torch.int_repr: lambda input: -1,
torch.inverse: lambda input, out=None: -1,
torch.linalg.inv: lambda input, out=None: -1,
torch.linalg.inv_ex: lambda input, check_errors=False, out=None: -1,
torch.is_complex: lambda input: -1,
torch.is_conj: lambda input: -1,
torch.is_neg: lambda input: -1,
torch.is_distributed: lambda input: -1,
torch.is_inference: lambda input: -1,
torch.is_floating_point: lambda input: -1,
torch.is_nonzero: lambda input: -1,
torch.is_same_size: lambda input, other: -1,
torch.is_signed: lambda input: -1,
torch.isclose: lambda input, other, rtol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.isnan: lambda input: -1,
torch.istft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
normalized=False, onesided=None, length=None, return_complex=False: -1),
torch.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.kron: lambda input, other: -1,
torch.kthvalue: lambda input, k, dim=None, keepdim=False, out=None: -1,
torch.linalg.ldl_factor_ex: lambda input, hermitian=False, check_errors=False, out=None: -1,
torch.linalg.ldl_factor: lambda input, hermitian=False, out=None: -1,
torch.linalg.ldl_solve: lambda LD, pivots, B, hermitian=False, out=None: -1,
torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled=True: -1,
torch.lcm: lambda input, other, out=None: -1,
torch.ldexp: lambda input, other, out=None: -1,
torch.le: lambda input, other, out=None: -1,
torch.less_equal: lambda input, other, out=None: -1,
torch.lerp: lambda input, end, weight, out=None: -1,
torch.lgamma: lambda input, out=None: -1,
torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest=None, method=None,
tracker=None, ortho_iparams=None, ortho_fparams=None, ortho_bparams=None: -1,
torch.log: lambda input, out=None: -1,
torch.log_softmax: lambda input, dim, dtype=None: -1,
torch.log10: lambda input, out=None: -1,
torch.log1p: lambda input, out=None: -1,
torch.log2: lambda input, out=None: -1,
torch.logaddexp: lambda input, other, out=None: -1,
torch.logaddexp2: lambda input, other, out=None: -1,
torch.logdet: lambda input: -1,
torch.xlogy: lambda x, y, out=None: -1,
torch.logical_and: lambda input, other, out=None: -1,
torch.logical_not: lambda input, out=None: -1,
torch.logical_or: lambda input, other, out=None: -1,
torch.logical_xor: lambda input, other, out=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.logit: lambda input, eps=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.lstm: lambda data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional: -1,
torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.lstsq: lambda input, A, out=None: -1,
torch.lt: lambda input, other, out=None: -1,
torch.less: lambda input, other, out=None: -1,
torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1,
torch.lu_solve: lambda b, LU_data, LU_pivots, out=None: -1,
torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1, # type: ignore[attr-defined] # noqa: B950
torch.masked_fill: lambda input, mask, value: -1,
torch.masked_scatter: lambda input, mask, source: -1,
torch.masked_select: lambda input, mask, out=None: -1,
torch.matmul: lambda input, other, out=None: -1,
torch.linalg.lu: lambda input, pivot=True, out=None: -1,
torch.linalg.lu_factor: lambda input, pivot=True, out=None: -1,
torch.linalg.lu_factor_ex: lambda input, pivot=True, check_errors=False, out=None: -1,
torch.linalg.lu_solve: lambda LU, pivots, B, left=True, adjoint=False, out=None: -1,
torch.linalg.matmul: lambda input, other, out=None: -1, # alias for torch.matmul
torch.matrix_power: lambda input, n: -1,
torch.linalg.matrix_power: lambda input, n, out=None: -1,
torch.matrix_rank: lambda input, tol=None, symmetric=False: -1,
torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1,
torch.linalg.multi_dot: lambda tensors, out=None: -1,
torch.matrix_exp: lambda input: -1,
torch.linalg.matrix_exp: lambda input: -1,
torch.max: lambda input, out=None: -1,
torch.maximum: lambda input, other, out=None: -1,
torch.fmax: lambda input, other, out=None: -1,
torch.max_pool1d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool2d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool3d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.mean: lambda input, dim=None: -1,
torch.nanmean: lambda input, dim=None, keepdim=False, dtype=None, out=None: -1,
torch.median: lambda input, dim=None: -1,
torch.nanmedian: lambda input, dim=None: -1,
torch.meshgrid: lambda *tensors, **kwargs: -1,
torch.min: lambda input, out=None: -1,
torch.minimum: lambda input, other, out=None: -1,
torch.fmin: lambda input, other, out=None: -1,
torch.miopen_batch_norm: (lambda input, weight, bias, running_mean, running_var, training,
exponential_average_factor, epsilon: -1),
torch.miopen_convolution: lambda input, weight, bias, padding, stride, dilation, groups, benchmark, deterministic: -1,
torch.miopen_convolution_transpose: (lambda input, weight, bias, padding, output_padding, stride, dilation,
groups, benchmark, deterministic: -1),
torch.miopen_depthwise_convolution: (lambda input, weight, bias, padding, stride, dilation, groups, benchmark,
deterministic: -1),
torch.miopen_rnn: (lambda input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first,
dropout, train, bidirectional, batch_sizes, dropout_state: -1),
torch.mm: lambda input, mat2, out=None: -1,
torch.mode: lambda input, dim=-1, keepdim=False, out=None: -1,
torch.movedim: lambda input, source, destination: -1,
torch.moveaxis: lambda input, source, destination: -1,
torch.msort: lambda input, descending=False, out=None: -1,
torch.mul: lambda input, other, out=None: -1,
torch.multiply: lambda input, other, out=None: -1,
torch.multinomial: lambda input, num_samples, replacement=False, out=None: -1,
torch.mv: lambda input, vec, out=None: -1,
torch.mvlgamma: lambda input, p: -1,
torch.narrow: lambda input, dim, start, length: -1,
torch.narrow_copy: lambda input, dim, start, length: -1,
torch.nan_to_num: lambda input, nan=0.0, posinf=None, neginf=None, out=None: -1,
torch.native_batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps: -1,
torch.native_dropout: lambda input, p, train: -1,
torch.native_layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.native_group_norm: lambda input, weight, bias, N, C, HxW, group, eps: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
torch.native_channel_shuffle: lambda input, groups : -1,
torch.ne: lambda input, other, out=None: -1,
torch.not_equal: lambda input, other, out=None: -1,
torch.neg: lambda input, out=None: -1,
torch.negative: lambda input, out=None: -1,
torch.nextafter: lambda input, other, out=None: -1,
torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1,
torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1,
torch.nn.functional.adaptive_max_pool1d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool1d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.affine_grid: lambda theta, size, align_corners=None: -1,
torch.nn.functional.alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.avg_pool2d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.avg_pool3d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.batch_norm: (lambda input, running_mean, running_var, weight=None, bias=None, training=False,
momentum=0.1, eps=1e-05: -1),
torch.nn.functional.bilinear: lambda input1, input2, weight, bias=None: -1,
torch.nn.functional.binary_cross_entropy: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction="mean": -1),
torch.nn.functional.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction="mean", pos_weight=None: -1),
torch.nn.functional.celu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.cosine_embedding_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.cross_entropy: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction="mean", label_smoothing=0.0: -1),
torch.nn.functional.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0,
reduction='mean', zero_infinity=False: -1),
torch.nn.functional.dropout: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout1d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout2d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout3d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.elu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0,
scale_grad_by_freq=False, sparse=False: -1),
torch.nn.functional.embedding_bag: (lambda input, weight, offsets=None, max_norm=None, norm_type=2,
scale_grad_by_freq=False, mode='mean', sparse=False, per_sample_weights=None,
include_last_offset=False, padding_idx=None: -1),
torch.nn.functional.feature_alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.fold: lambda input, output_size, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nn.functional.fractional_max_pool2d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool2d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.gaussian_nll_loss: lambda input, target, var, full=False, eps=1e-06, reduction='mean': -1,
torch.nn.functional.gelu: lambda input, approximate='none': -1,
torch.nn.functional.glu: lambda input, dim=-1: -1,
torch.nn.functional.grid_sample: lambda input, grid, mode='bilinear', padding_mode='zeros', align_corners=None: -1,
torch.nn.functional.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.gumbel_softmax: lambda logits, tau=1, hard=False, eps=1e-10, dim=-1: -1,
torch.nn.functional.hardshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.hardtanh: lambda input, min_val=-1., max_val=1., inplace=False: -1,
torch.nn.functional.hinge_embedding_loss: (lambda input, target, margin=1.0, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.instance_norm: (lambda input, running_mean=None, running_var=None, weight=None, bias=None,
use_input_stats=True, momentum=0.1, eps=1e-05: -1),
torch.nn.functional.interpolate: (lambda input, size=None, scale_factor=None, mode='nearest', align_corners=None,
recompute_scale_factor=None, antialias=False: -1),
torch.nn.functional.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.nn.functional.l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.leaky_relu: lambda input, negative_slope=0.01, inplace=False: -1,
torch.nn.functional.linear: lambda input, weight, bias=None: -1,
torch.nn.functional.local_response_norm: lambda input, size, alpha=0.0001, beta=0.75, k=1.0: -1,
torch.nn.functional.log_softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.logsigmoid: lambda input: -1,
torch.nn.functional.lp_pool1d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.lp_pool2d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.margin_ranking_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.max_pool1d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool2d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool2d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_unpool1d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool2d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool3d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.mse_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.multi_head_attention_forward: (
lambda query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v,
add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None,
need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None,
v_proj_weight=None, static_k=None, static_v=None, average_attn_weights=None: -1),
torch.nn.functional.multi_margin_loss: (lambda input, target, p=1, margin=1.0, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.multilabel_margin_loss: (lambda input, target, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.multilabel_soft_margin_loss: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.nll_loss: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean': -1),
torch.nn.functional.normalize: lambda input, p=2, dim=1, eps=1e-12, out=None: -1,
torch.nn.functional.one_hot: lambda tensor, num_classes=-1: -1,
torch.nn.functional.pad: lambda input, pad, mode='constant', value=0: -1,
torch.nn.functional.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.nn.functional.poisson_nll_loss: (lambda input, target, log_input=True, full=False, size_average=None,
eps=1e-08, reduce=None, reduction='mean': -1),
torch.nn.functional.prelu: lambda input, weight: -1,
torch.nn.functional.relu: lambda input, inplace=False: -1,
torch.nn.functional.relu6: lambda input, inplace=False: -1,
torch.nn.functional.rrelu: lambda input, lower=0.125, upper=0.3333333333333333, training=False, inplace=False: -1,
torch.nn.functional.selu: lambda input, inplace=False: -1,
torch.nn.functional.silu: lambda input, inplace=False: -1,
torch.nn.functional.mish: lambda input, inplace=False: -1,
torch.nn.functional.smooth_l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean', beta=1.: -1,
torch.nn.functional.huber_loss: lambda input, target, reduction='mean', delta=1.: -1,
torch.nn.functional.soft_margin_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softmin: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softplus: lambda input, beta=1, threshold=20: -1,
torch.nn.functional.softshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.softsign: lambda input: -1,
torch.nn.functional.tanhshrink: lambda input: -1,
torch.nn.functional.threshold: lambda input, threshold, value, inplace=False: -1,
torch.nn.functional.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06,
swap=False, size_average=None, reduce=None, reduction='mean': -1),
torch.nn.functional.triplet_margin_with_distance_loss: (lambda anchor, positive, negative, *,
distance_function=None, margin=1.0,
swap=False, reduction='mean': -1),
torch.nn.functional.unfold: lambda input, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nn.init.uniform_: lambda tensor, a=0., b=1.: -1,
torch.nn.init.constant_: lambda tensor, val: -1,
torch.nn.init.normal_: lambda tensor, mean=0., std=1.: -1,
torch.nn.init.constant_: lambda tensor, val: -1,
torch.nn.init.kaiming_uniform_: lambda tensor, a=0, mode='fan_in', nonlinearity='leaky_relu': -1,
torch.nonzero: lambda input, as_tuple=False: -1,
torch.argwhere: lambda input: -1,
torch.norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.norm: lambda input, ord=None, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.vector_norm: lambda input, ord=2, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.matrix_norm: lambda input, ord='fro', dim=(-2, -1), keepdim=False, out=None, dtype=None: -1,
torch.norm_except_dim: lambda v, pow=2, dim=0: -1,
torch.nuclear_norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.numel: lambda input: -1,
torch.orgqr: lambda input, tau: -1,
torch.ormqr: lambda input, input2, input3, left=True, transpose=False: -1,
torch.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.permute: lambda self, dim: -1,
torch.pca_lowrank: lambda input, q=None, center=True, niter=2: -1,
torch.pdist: lambda input, p=2: -1,
torch.pinverse: lambda input, rcond=1e-15: -1,
torch.linalg.pinv: lambda input, rcond=1e-15, hermitian=False: -1,
torch.pixel_shuffle: lambda input, upscale_factor: -1,
torch.pixel_unshuffle: lambda input, downscale_factor: -1,
torch.poisson: lambda input, generator=None: -1,
torch.poisson_nll_loss: lambda input, target, log_input, full, eps, reduction: -1,
torch.polygamma: lambda input, n, out=None: -1,
torch.positive: lambda input, out=None: -1,
torch.prelu: lambda input, weight: -1,
torch.ones_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.pow: lambda input, exponent, out=None: -1,
torch.prod: lambda input, dtype=None: -1,
torch.put: lambda input, index, source, accumulate=False: -1,
torch.q_per_channel_axis: lambda input: -1,
torch.q_per_channel_scales: lambda input: -1,
torch.q_per_channel_zero_points: lambda input: -1,
torch.q_scale: lambda input: -1,
torch.q_zero_point: lambda input: -1,
torch.qr: lambda input, some=True, out=None: -1,
torch.linalg.qr: lambda input, mode='reduced', out=None: -1,
torch.quantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1,
torch.nanquantile: lambda input, q, dim=None, keepdim=False, interpolation='linear', out=None: -1,
torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1,
torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1,
torch.quantize_per_tensor_dynamic: lambda input, dtype, reduce_range: -1,
torch.quantized_batch_norm: lambda input, weight, bias, mean, var, eps, output_scale, output_zero_point: -1,
torch.quantized_gru_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_lstm_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_max_pool1d: (lambda input, kernel_size, stride=tuple(), padding=(0,),
dilation=(1,), ceil_mode=False: -1),
torch.quantized_max_pool2d: (lambda input, kernel_size, stride=tuple(), padding=(0, 0),
dilation=(1, 1), ceil_mode=False: -1),
torch.quantized_rnn_relu_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_rnn_tanh_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.rad2deg: lambda input, out=None: -1,
torch.rand_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.randint_like: lambda input, high, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.ravel: lambda input: -1,
torch.real: lambda input, out=None: -1,
torch.vdot: lambda input, other, out=None: -1,
torch.linalg.vecdot: lambda input, other, dim=-1, out=None: -1,
torch.view_as_real: lambda input: -1,
torch.view_as_complex: lambda input: -1,
torch.reciprocal: lambda input, out=None: -1,
torch.relu: lambda input, inplace=False: -1,
torch.remainder: lambda input, other, out=None: -1,
torch.renorm: lambda input, p, dim, maxnorm, out=None: -1,
torch.repeat_interleave: lambda input, dim=None: -1,
torch.reshape: lambda input, shape: -1,
torch.rnn_relu: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_relu_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.rnn_tanh: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_tanh_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.roll: lambda input, shifts, dims=None: -1,
torch.rot90: lambda input, k=1, dims=(0, 1): -1,
torch.round: lambda input, out=None: -1,
torch.row_stack: lambda tensors, out=None: -1, # alias for torch.vstack
torch._rowwise_prune: (lambda weight, mask, compressed_indices_dtype: -1),
torch.rrelu: lambda input, lower=1. / 8, upper=1. / 3, training=False, inplace=False: -1,
torch.rsqrt: lambda input, out=None: -1,
torch.rsub: lambda input, other, alpha=1: -1,
torch.saddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.scatter: lambda input, dim, index, src: -1,
torch.scatter_add: lambda input, dim, index, src: -1,
torch.scatter_reduce: lambda input, dim, index, src, reduce, include_self=True: -1,
torch.searchsorted: lambda sorted_sequence, input, out_int32=False, right=False, out=None: -1,
torch.segment_reduce: lambda data, reduce="max", lengths=None, indices=None, offsets=None, axis=0, unsafe=False: -1,
torch.select: lambda input, dim, index: -1,
torch.select_scatter: lambda input, src, dim, index: -1,
torch.slice_scatter: lambda input, src, dim=0, start=None, end=None, step=1: -1,
torch.selu: lambda input, inplace=False: -1,
torch.sigmoid: lambda input, out=None: -1,
torch.sign: lambda input, out=None: -1,
torch.signbit: lambda input, out=None: -1,
torch.sgn: lambda input, out=None: -1,
torch.sin: lambda input, out=None: -1,
torch.sinc: lambda input, out=None: -1,
torch.sinh: lambda input, out=None: -1,
torch.slogdet: lambda input: -1,
torch.linalg.slogdet: lambda input: -1,
torch.smm: lambda input, mat2: -1,
torch.spmm: lambda input, mat2: -1,
torch.softmax: lambda input, dim, dtype=None: -1,
torch.linalg.solve: lambda A, B, left=True, out=None: -1,
torch.linalg.solve_ex: lambda A, B, left=True, check_errors=False, out=None: -1,
torch.sort: lambda input, dim=-1, descending=False, *, stable=False, out=None: -1,
torch.split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.sqrt: lambda input, out=None: -1,
torch.square: lambda input, out=None: -1,
torch.squeeze: lambda input, dim=None, out=None: -1,
torch.sspaddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.stack: lambda tensors, dim=0, out=None: -1,
torch.std: lambda input, dim=None: -1,
torch.std_mean: lambda input, dim=None: -1,
torch.stft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
pad_mode='reflect', normalized=False, onesided=True, return_complex=None: -1),
torch.sub: lambda input, other, out=None: -1,
torch.subtract: lambda input, other, out=None: -1,
torch.sum: lambda input, dim=None: -1,
torch.nansum: lambda input, dim=None: -1,
torch.svd: lambda input, some=True, compute_uv=True, out=None: -1,
torch.svd_lowrank: lambda input, q=6, niter=2, M=None: -1,
torch.linalg.svd: lambda input, full_matrices=True, out=None: -1,
torch.linalg.svdvals: lambda input, out=None: -1,
torch.symeig: lambda input, eigenvectors=False, upper=True, out=None: -1,
torch.swapaxes: lambda input, dim0, dim1: -1,
torch.swapdims: lambda input, axis0, axis1: -1,
torch.special.airy_ai: lambda input: -1,
torch.special.bessel_j0: lambda input: -1,
torch.special.bessel_j1: lambda input: -1,
torch.special.bessel_y0: lambda input: -1,
torch.special.bessel_y1: lambda input: -1,
torch.special.chebyshev_polynomial_t: lambda input, n, out=None: -1,
torch.special.chebyshev_polynomial_u: lambda input, n, out=None: -1,
torch.special.chebyshev_polynomial_v: lambda input, n, out=None: -1,
torch.special.chebyshev_polynomial_w: lambda input, n, out=None: -1,
torch.special.digamma: lambda input: -1,
torch.special.entr: lambda input: -1,
torch.special.erf: lambda input: -1,
torch.special.erfc: lambda input: -1,
torch.special.erfcx: lambda input: -1,
torch.special.erfinv: lambda input: -1,
torch.special.exp2: lambda input: -1,
torch.special.expit: lambda input: -1,
torch.special.expm1: lambda input: -1,
torch.special.gammainc: lambda input, other, out=None: -1,
torch.special.gammaincc: lambda input, other, out=None: -1,
torch.special.gammaln: lambda input: -1,
torch.special.hermite_polynomial_h: lambda input, n, out=None: -1,
torch.special.hermite_polynomial_he: lambda input, n, out=None: -1,
torch.special.i0: lambda input: -1,
torch.special.i0e: lambda input: -1,
torch.special.i1: lambda input: -1,
torch.special.i1e: lambda input: -1,
torch.special.laguerre_polynomial_l: lambda input, n, out=None: -1,
torch.special.legendre_polynomial_p: lambda input, n, out=None: -1,
torch.special.log1p: lambda input: -1,
torch.special.log_ndtr: lambda input: -1,
torch.special.log_softmax: lambda input, dim, dtype=None: -1,
torch.special.logit: lambda input: -1,
torch.special.logsumexp: lambda input, dim, keepdim=False, out=None: -1,
torch.special.modified_bessel_i0: lambda input: -1,
torch.special.modified_bessel_i1: lambda input: -1,
torch.special.modified_bessel_k0: lambda input: -1,
torch.special.modified_bessel_k1: lambda input: -1,
torch.special.multigammaln: lambda input, p: -1,
torch.special.ndtr: lambda input: -1,
torch.special.ndtri: lambda input: -1,
torch.special.polygamma: lambda input, n, out=None: -1,
torch.special.psi: lambda input: -1,
torch.special.round: lambda input: -1,
torch.special.scaled_modified_bessel_k0: lambda input: -1,
torch.special.scaled_modified_bessel_k1: lambda input: -1,
torch.special.shifted_chebyshev_polynomial_t: lambda input, n, out=None: -1,
torch.special.shifted_chebyshev_polynomial_u: lambda input, n, out=None: -1,
torch.special.shifted_chebyshev_polynomial_v: lambda input, n, out=None: -1,
torch.special.shifted_chebyshev_polynomial_w: lambda input, n, out=None: -1,
torch.special.sinc: lambda input: -1,
torch.special.softmax: lambda input, dim, dtype=None: -1,
torch.special.spherical_bessel_j0: lambda input: -1,
torch.special.xlog1py: lambda input, other, out=None: -1,
torch.special.xlogy: lambda input, other, out=None: -1,
torch.special.zeta: lambda self, other, out=None: -1,
torch.t: lambda input: -1,
torch.take: lambda input, index: -1,
torch.take_along_dim: lambda input, indices, dim=None, out=None: -1,
torch.tan: lambda input, out=None: -1,
torch.tanh: lambda input, out=None: -1,
torch.linalg.tensorinv: lambda a, ind=2: -1,
torch.linalg.tensorsolve: lambda a, b, dims=None: -1,
torch.tensordot: lambda a, b, dims=2, out=None: -1,
torch.tensor_split: lambda input, indices_or_sections, dim=0: -1,
torch.threshold: lambda input, threshold, value, inplace=False: -1,
torch.tile: lambda input, dims: -1,
torch.topk: lambda input, k, dim=-1, descending=False, out=None: -1,
torch.trace: lambda input: -1,
torch.transpose: lambda input, dim0, dim1: -1,
torch.trapz: lambda y, x=None, dim=-1: -1,
torch.trapezoid: lambda y, x=None, dim=-1: -1,
torch.triangular_solve: lambda input, A, upper=True, transpose=False, unitriangular=False: -1,
torch.linalg.solve_triangular: lambda input, B, upper, left=True, unitriangular=False: -1,
torch.tril: lambda input, diagonal=0, out=None: -1,
torch.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06, swap=False,
size_average=None, reduce=None, reduction='mean': -1),
torch.triu: lambda input, diagonal=0, out=None: -1,
torch.true_divide: lambda input, other: -1,
torch.trunc: lambda input, out=None: -1,
torch.unbind: lambda input, dim=0: -1,
torch.unflatten: lambda input, dim, sizes, names: -1,
torch.unique: lambda input, sorted=True, return_inverse=False, return_counts=False, dim=None: -1,
torch.unique_consecutive: lambda input, return_inverse=False, return_counts=False, dim=None: -1,
torch.unsafe_chunk: lambda input, chunks, dim=0: -1,
torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsqueeze: lambda input, dim, out=None: -1,
torch.linalg.vander: lambda x, N=None: -1,
torch.var: lambda input, dim=None: -1,
torch.var_mean: lambda input, dim=None: -1,
torch.vsplit: lambda input, indices_or_sections: -1,
torch.vstack: lambda tensors, out=None: -1,
torch.where: lambda condition, x=None, y=None: -1,
torch.zeros_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch._fw_primal_copy: lambda self, level: -1,
torch._make_dual_copy: lambda primal, tangent, level: -1,
torch.view_as_real_copy: lambda self: -1,
torch.view_as_complex_copy: lambda self: -1,
torch._conj_copy: lambda self: -1,
torch._neg_view_copy: lambda self: -1,
torch.as_strided_copy: lambda self, size, stride, storage_offset=None: -1,
torch._sparse_broadcast_to_copy: lambda self, size: -1,
torch.diagonal_copy: lambda self, offset=0, dim1=0, dim2=1: -1,
torch.expand_copy: lambda self, size, *, implicit=False: -1,
torch.narrow_copy: lambda self, dim, start, length: -1,
torch.permute_copy: lambda self, dims: -1,
torch._reshape_alias_copy: lambda self, size, stride: -1,
torch.select_copy: lambda self, dim, index: -1,
torch.detach_copy: lambda self: -1,
torch.slice_copy: lambda self, dim=0, start=None, end=None, step=1: -1,
torch.split_copy: lambda self, split_size, dim=0: -1,
torch.split_with_sizes_copy: lambda self, split_sizes, dim=0: -1,
torch.squeeze_copy: lambda self: -1,
torch.squeeze_copy: lambda self, dim: -1,
torch.t_copy: lambda self: -1,
torch.transpose_copy: lambda self, dim0, dim1: -1,
torch.unsqueeze_copy: lambda self, dim: -1,
torch._indices_copy: lambda self: -1,
torch._values_copy: lambda self: -1,
torch.indices_copy: lambda self: -1,
torch.values_copy: lambda self: -1,
torch.crow_indices_copy: lambda self: -1,
torch.col_indices_copy: lambda self: -1,
torch.ccol_indices_copy: lambda self: -1,
torch.row_indices_copy: lambda self: -1,
torch.unbind_copy: lambda self, dim=0: -1,
torch.view_copy: lambda self, size: -1,
torch.view_copy: lambda self, dtype: -1,
torch.unfold_copy: lambda self, dimension, size, step: -1,
torch.alias_copy: lambda self: -1,
Tensor.__floordiv__: lambda self, other: -1,
Tensor.__rfloordiv__: lambda self, other: -1,
Tensor.__ifloordiv__: lambda self, other: -1,
Tensor.__truediv__: lambda self, other: -1,
Tensor.__rtruediv__: lambda self, other: -1,
Tensor.__itruediv__: lambda self, other: -1,
Tensor.__lshift__: lambda self, other: -1,
Tensor.__rlshift__: lambda self, other: -1,
Tensor.__ilshift__: lambda self, other: -1,
Tensor.__rshift__: lambda self, other: -1,
Tensor.__rrshift__: lambda self, other: -1,
Tensor.__irshift__: lambda self, other: -1,
Tensor.__and__: lambda self, other: -1,
Tensor.__or__: lambda self, other: -1,
Tensor.__xor__: lambda self, other: -1,
Tensor.__float__: lambda self: -1,
Tensor.__complex__: lambda self: -1,
Tensor.__array__: lambda self, dtype: -1,
Tensor.__bool__: lambda self: -1,
Tensor.__contains__: lambda self, other: -1,
Tensor.__neg__: lambda self: -1,
Tensor.__invert__: lambda self: -1,
Tensor.__mod__: lambda self, other: -1,
Tensor.__rmod__: lambda self, other: -1,
Tensor.__imod__: lambda self, other: -1,
Tensor.__array_wrap__: lambda self, array: -1,
Tensor.__getitem__: lambda self, idx: -1,
Tensor.__deepcopy__: lambda self, memo: -1,
Tensor.__int__: lambda self: -1,
Tensor.__long__: lambda self: -1,
Tensor.__index__: lambda self: -1,
Tensor.__len__: lambda self: -1,
Tensor.__format__: lambda self, format_spec: -1,
Tensor.__reduce_ex__: lambda self, proto: -1,
Tensor.__reversed__: lambda self: -1,
Tensor.__repr__: lambda self, *, tensor_contents=None: -1,
Tensor.__setitem__: lambda self, k, v: -1,
Tensor.__setstate__: lambda self, d: -1,
Tensor.T.__get__: lambda self: -1,
Tensor.H.__get__: lambda self: -1,
Tensor.mT.__get__: lambda self: -1,
Tensor.mH.__get__: lambda self: -1,
Tensor._backward_hooks.__get__: lambda self: -1,
Tensor._base.__get__: lambda self: -1,
Tensor._cdata.__get__: lambda self: -1,
Tensor.grad.__get__: lambda self: -1,
Tensor._grad.__get__: lambda self: -1,
Tensor._grad_fn.__get__: lambda self: -1,
Tensor.grad_fn.__get__: lambda self: -1,
Tensor._version.__get__: lambda self: -1,
Tensor._autocast_to_reduced_precision: lambda self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype: -1,
Tensor._autocast_to_full_precision: lambda self, cuda_enabled, cpu_enabled: -1,
Tensor.data.__get__: lambda self: -1,
Tensor.device.__get__: lambda self: -1,
Tensor.dtype.__get__: lambda self: -1,
Tensor.is_cuda.__get__: lambda self: -1,
Tensor.is_cpu.__get__: lambda self: -1,
Tensor.is_xpu.__get__: lambda self: -1,
Tensor.is_ipu.__get__: lambda self: -1,
Tensor.is_leaf.__get__: lambda self: -1,
Tensor.retains_grad.__get__: lambda self: -1,
Tensor.is_meta.__get__: lambda self: -1,
Tensor.is_mps.__get__: lambda self: -1,
Tensor.is_nested.__get__: lambda self: -1,
Tensor.is_ort.__get__: lambda self: -1,
Tensor.is_mkldnn.__get__: lambda self: -1,
Tensor.is_quantized.__get__: lambda self: -1,
Tensor.is_sparse.__get__: lambda self: -1,
Tensor.is_sparse_csr.__get__: lambda self: -1,
Tensor.is_vulkan.__get__: lambda self: -1,
Tensor.layout.__get__: lambda self: -1,
Tensor.name.__get__: lambda self: -1,
Tensor.names.__get__: lambda self: -1,
Tensor.ndim.__get__: lambda self: -1,
Tensor.output_nr.__get__: lambda self: -1,
Tensor.requires_grad.__get__: lambda self: -1,
Tensor.shape.__get__: lambda self: -1,
Tensor.volatile.__get__: lambda self: -1,
Tensor.real.__get__: lambda self: -1,
Tensor.imag.__get__: lambda self: -1,
Tensor.__cuda_array_interface__.__get__: lambda self: -1,
Tensor.type: lambda self, dtype=None, non_blocking=False, **kwargs: -1,
Tensor._coalesced_: lambda self: -1,
Tensor._dimI: lambda self: -1,
Tensor._dimV: lambda self: -1,
Tensor._indices: lambda self: -1,
Tensor._is_view: lambda self: -1,
Tensor._nnz: lambda self: -1,
Tensor.crow_indices: lambda self: -1,
Tensor.col_indices: lambda self: -1,
Tensor.ccol_indices: lambda self: -1,
Tensor.row_indices: lambda self: -1,
Tensor._update_names: lambda self, names, inplace: -1,
Tensor._values: lambda self: -1,
Tensor.adjoint: lambda self: -1,
Tensor.align_as: lambda self, other: -1,
Tensor.align_to: lambda self, order, ellipsis_idx: -1,
Tensor.apply_: lambda self, callable: -1,
Tensor.as_strided: lambda self, size, stride: -1,
Tensor.as_strided_: lambda self, size, stride: -1,
Tensor.backward: lambda self, gradient=None, retain_graph=None, create_graph=False, inputs=None: -1,
Tensor.bfloat16: lambda self, memory_format=torch.preserve_format: -1,
Tensor.bool: lambda self, memory_format=torch.preserve_format: -1,
Tensor.byte: lambda self, memory_format=torch.preserve_format: -1,
Tensor.char: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cauchy_: lambda self, median=0, sigma=1, *, generator=None: -1,
Tensor.coalesce: lambda self: -1,
Tensor._coalesced_: lambda self, coalesced: -1,
Tensor.contiguous: lambda self, memory_format=torch.contiguous_format: -1,
Tensor.copy_: lambda self, src, non_blocking=False: -1,
Tensor.cpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cuda: lambda self, memory_format=torch.preserve_format: -1,
Tensor.xpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.ipu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.data_ptr: lambda self: -1,
Tensor.dense_dim: lambda self: -1,
Tensor.diagonal_scatter: lambda self, src, offset=0, dim1=0, dim2=1: -1,
Tensor.dim: lambda self: -1,
Tensor.double: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cdouble: lambda self, memory_format=torch.preserve_format: -1,
Tensor.element_size: lambda self: -1,
Tensor.expand: lambda self, size: -1,
Tensor.expand_as: lambda self, other: -1,
Tensor.exponential_: lambda self, lambd=1, *, generator=None: -1,
Tensor.fill_: lambda self, value: -1,
Tensor.fill_diagonal_: lambda self, value: -1,
Tensor.float: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cfloat: lambda self, memory_format=torch.preserve_format: -1,
Tensor.geometric_: lambda self, p, *, generator=None: -1,
Tensor.get_device: lambda self: -1,
Tensor.half: lambda self, memory_format=torch.preserve_format: -1,
Tensor.chalf: lambda self, memory_format=torch.preserve_format: -1,
Tensor.has_names: lambda self: -1,
Tensor.indices: lambda self: -1,
Tensor.int: lambda self, memory_format=torch.preserve_format: -1,
Tensor.is_coalesced: lambda self: -1,
Tensor.is_contiguous: lambda self: -1,
Tensor.is_inference: lambda self: -1,
Tensor.is_pinned: lambda self: -1,
Tensor.is_set_to: lambda self, tensor: -1,
Tensor.is_shared: lambda self: -1,
Tensor.item: lambda self: -1,
Tensor.log_normal_: lambda self, mean=1, std=2, *, generator=None: -1,
Tensor.log_softmax: lambda self, dim: -1,
Tensor.long: lambda self, memory_format=torch.preserve_format: -1,
Tensor.map_: lambda self, tensor, callable: -1,
Tensor.map2_: lambda self, x, y, callable: -1,
Tensor.mm: lambda self, mat2: -1,
Tensor.narrow_copy: lambda self, dimension, start, length: -1,
Tensor.ndimension: lambda self: -1,
Tensor.nelement: lambda self: -1,
Tensor._nested_tensor_size: lambda self: -1,
Tensor.normal_: lambda self: -1,
Tensor.numpy: lambda self: -1,
Tensor.permute: lambda self, dim: -1,
Tensor.pin_memory: lambda self: -1,
Tensor.put_: lambda self, indices, tensor, accumulate=False: -1,
Tensor.qscheme: lambda self: -1,
Tensor.random_: lambda self, from_=0, to=None, *, generator=None: -1,
Tensor.record_stream: lambda self, stream: -1,
Tensor.refine_names: lambda self, names: -1,
Tensor.register_hook: lambda self, hook: -1,
Tensor.rename: lambda self, name: -1,
Tensor.repeat: lambda self, *size: -1,
Tensor.requires_grad_: lambda self, requires_grad=True: -1,
Tensor.reshape_as: lambda self, other: -1,
Tensor.resize: lambda self, *size: -1,
Tensor.resize_: lambda self, size: -1,
Tensor.resize_as: lambda self, other: -1,
Tensor.resize_as_sparse_: lambda self, other: -1,
Tensor.retain_grad: lambda self: -1,
Tensor.set_: lambda self, source=None, storage_offset=0, size=None, stride=None: -1,
Tensor.select_scatter: lambda self, src, dim, index: -1,
Tensor.share_memory_: lambda self: -1,
Tensor.short: lambda self, memory_format=torch.preserve_format: -1,
Tensor.size: lambda self: -1,
Tensor.slice_scatter: lambda self, src, dim=0, start=None, end=None, step=1: -1,
Tensor.sparse_dim: lambda self: -1,
Tensor.sparse_mask: lambda self, mask: -1,
Tensor.sparse_resize_: lambda self, size1, size2, dense_dim: -1,
Tensor.sparse_resize_and_clear_: lambda self, size1, size2, dense_dim: -1,
Tensor.sspaddmm: lambda self, mat1, mat2, beta=1, alpha=1, out=None: -1,
Tensor.storage: lambda self: -1,
Tensor._storage: lambda self: -1,
Tensor.storage_offset: lambda self: -1,
Tensor.storage_type: lambda self: -1,
Tensor.sum_to_size: lambda self, size: -1,
Tensor.tile: lambda self, *reps: -1,
Tensor.to: lambda self, dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format: -1,
Tensor.to_dense: lambda self, dtype=None: -1,
Tensor._to_dense: lambda self, dtype=None: -1,
Tensor.to_sparse: lambda self: -1,
Tensor.tolist: lambda self: -1,
Tensor.to_mkldnn: lambda self: -1,
Tensor.type_as: lambda self, other: -1,
Tensor.unfold: lambda self, dimension, size, step: -1,
Tensor.uniform_: lambda self, from_=0, to=1: -1,
Tensor.values: lambda self: -1,
Tensor.view: lambda self, shape: -1,
Tensor.view_as: lambda self, other: -1,
Tensor.zero_: lambda self: -1,
Tensor.__dlpack__: lambda self, stream=None: -1,
Tensor.__dlpack_device__: lambda self: -1,
torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1,
}
ret2 = {}
ignored = get_ignored_functions()
for k, v in ret.items():
# Generate methods like __add__ and add_ by default from add
names = [
k.__name__, # Default method
k.__name__ + "_", # Inplace variant
"__" + k.__name__ + "__", # Dunder method
"__i" + k.__name__ + "__", # Inplace dunder method
"__r" + k.__name__ + "__", # Reverse dunder method
]
if k.__name__.startswith("bitwise_"):
# bitwise_<op> have dunder methods of the form __<op>__
# And so on.
subname = k.__name__[len("bitwise_"):]
names.extend([
"__" + subname + "__",
"__i" + subname + "__",
"__r" + subname + "__"
])
for name in names:
func = getattr(Tensor, name, None)
if callable(func) and func not in ret and func not in ignored:
ret2[func] = v
ret.update(ret2)
return ret
def wrap_torch_function(dispatcher: Callable):
"""Wraps a given function with ``__torch_function__`` -related functionality.
Parameters
----------
dispatcher: Callable
A callable that returns an iterable of Tensor-likes passed into the function.
Note
----
This decorator may reduce the performance of your code. Generally, it's enough to express
your code as a series of functions that, themselves, support __torch_function__. If you
find yourself in the rare situation where this is not the case, e.g. if you're wrapping a
low-level library and you also need it to work for Tensor-likes, then this function is available.
Examples
--------
>>> def dispatcher(a): # Must have the same signature as func
... return (a,)
>>> @torch.overrides.wrap_torch_function(dispatcher)
>>> def func(a): # This will make func dispatchable by __torch_function__
... return a + 0
"""
def inner(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
if has_torch_function(relevant_args):
return handle_torch_function(wrapped, relevant_args, *args, **kwargs)
return func(*args, **kwargs)
return wrapped
return inner
def _get_overloaded_args(relevant_args: Iterable[Any]) -> List[Any]:
"""Returns a list of arguments on which to call __torch_function__.
Checks arguments in relevant_args for __torch_function__ implementations,
storing references to the arguments and their types in overloaded_args and
overloaded_types in order of calling precedence. Only distinct types are
considered. If a type is a subclass of another type it will have higher
precedence, otherwise the precedence order is the same as the order of
arguments in relevant_args, that is, from left-to-right in the argument list.
The precedence-determining algorithm implemented in this function is
described in `NEP-0018`_.
See torch::append_overloaded_arg for the equivalent function in the C++
implementation.
Parameters
----------
relevant_args : iterable of array-like
Iterable of array-like arguments to check for __torch_function__
methods.
Returns
-------
overloaded_args : list
Arguments from relevant_args on which to call __torch_function__
methods, in the order in which they should be called.
.. _NEP-0018:
https://numpy.org/neps/nep-0018-array-function-protocol.html
"""
# If torch function is not enabled, there are no overloaded types
if not torch._C._is_torch_function_enabled():
return []
# Runtime is O(num_arguments * num_unique_types)
overloaded_types: Set[Type] = set()
overloaded_args: List[Any] = []
for arg in relevant_args:
arg_type = type(arg)
# We only collect arguments if they have a unique type, which ensures
# reasonable performance even with a long list of possibly overloaded
# arguments.
#
# NB: Important to exclude _disabled_torch_function_impl, otherwise
# https://github.com/pytorch/pytorch/issues/64687
if (arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__') and
arg_type.__torch_function__ != torch._C._disabled_torch_function_impl):
# Create lists explicitly for the first type (usually the only one
# done) to avoid setting up the iterator for overloaded_args.
if overloaded_types:
overloaded_types.add(arg_type)
# By default, insert argument at the end, but if it is
# subclass of another argument, insert it before that argument.
# This ensures "subclasses before superclasses".
index = len(overloaded_args)
for i, old_arg in enumerate(overloaded_args):
if issubclass(arg_type, type(old_arg)):
index = i
break
overloaded_args.insert(index, arg)
else:
overloaded_types = {arg_type}
overloaded_args = [arg]
return overloaded_args
def handle_torch_function(
public_api: Callable, relevant_args: Iterable[Any], *args, **kwargs) -> Any:
"""Implement a function with checks for ``__torch_function__`` overrides.
See torch::autograd::handle_torch_function for the equivalent of this
function in the C++ implementation.
Arguments
---------
public_api : function
Function exposed by the public torch API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __torch_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : tuple
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
object
Result from calling ``implementation`` or an ``__torch_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
Example
-------
>>> def func(a):
... if has_torch_function_unary(a):
... return handle_torch_function(func, (a,), a)
... return a + 0
"""
# Check for __torch_function__ methods.
overloaded_args = _get_overloaded_args(relevant_args)
# overloaded_args already have unique types.
types = tuple(map(type, overloaded_args))
# Check for __torch_function__ mode.
mode = _get_torch_function_mode()
if mode is not None:
# NB: unlike on tensors, modes are instances
with _no_torch_function_mode():
result = mode.__torch_function__(public_api, types, args, kwargs)
if result is not NotImplemented:
return result
# Call overrides
for overloaded_arg in overloaded_args:
# This call needs to become a classmethod call in the future.
# See https://github.com/pytorch/pytorch/issues/63767
torch_func_method = overloaded_arg.__torch_function__
if hasattr(torch_func_method, "__self__") and torch_func_method.__self__ is overloaded_arg and \
torch_func_method is not torch._C._disabled_torch_function_impl:
warnings.warn("Defining your `__torch_function__ as a plain method is deprecated and "
"will be an error in future, please define it as a classmethod.",
DeprecationWarning)
# Use `public_api` instead of `implementation` so __torch_function__
# implementations can do equality/identity comparisons.
result = torch_func_method(public_api, types, args, kwargs)
if result is not NotImplemented:
return result
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
msg = (
"no implementation found for '{}' on types that implement "
'__torch_function__: {}'
).format(func_name, [type(arg) for arg in overloaded_args])
if mode is not None:
msg += f" nor in mode {mode}"
raise TypeError(msg)
has_torch_function = _add_docstr(
_has_torch_function,
r"""Check for __torch_function__ implementations in the elements of an iterable
or if a __torch_function__ mode is enabled. Considers exact ``Tensor`` s
and ``Parameter`` s non-dispatchable. Use this to guard a call to
:func:`handle_torch_function`; don't use it to test if something
is Tensor-like, use :func:`is_tensor_like` instead.
Arguments
---------
relevant_args : iterable
Iterable or aguments to check for __torch_function__ methods.
Returns
-------
bool
True if any of the elements of relevant_args have __torch_function__
implementations, False otherwise.
See Also
________
torch.is_tensor_like
Checks if something is a Tensor-like, including an exact ``Tensor``.
"""
)
has_torch_function_unary = _add_docstr(
_has_torch_function_unary,
r"""Special case of `has_torch_function` for single inputs.
Instead of:
`has_torch_function((t,))`
call:
`has_torch_function_unary(t)`
which skips unnecessary packing and unpacking work.
"""
)
has_torch_function_variadic = _add_docstr(
_has_torch_function_variadic,
r"""Special case of `has_torch_function` that skips tuple creation.
This uses the METH_FASTCALL protocol introduced in Python 3.7
Instead of:
`has_torch_function((a, b))`
call:
`has_torch_function_variadic(a, b)`
which skips unnecessary packing and unpacking work.
"""
)
@functools.lru_cache(None)
def _get_overridable_functions() -> Tuple[Dict[Any, List[Callable]], Dict[Callable, str]]:
overridable_funcs = collections.defaultdict(list)
index = {}
tested_namespaces = [
("torch", torch, torch.__all__ + dir(torch._C._VariableFunctions)),
("torch.functional", torch.functional, torch.functional.__all__),
("torch.nn.functional", torch.nn.functional, dir(torch.nn.functional)),
("torch.nn.init", torch.nn.init, dir(torch.nn.init)),
("torch.Tensor", torch.Tensor, dir(torch.Tensor)),
("torch.linalg", torch.linalg, dir(torch.linalg)),
("torch.fft", torch.fft, dir(torch.fft)),
("torch.special", torch.special, dir(torch.special)),
]
for namespace_str, namespace, ns_funcs in tested_namespaces:
for func_name in ns_funcs:
ignore = False
# ignore private functions or functions that are deleted in torch.__init__
if namespace is not torch.Tensor:
if func_name.startswith('__'):
continue
elif func_name.startswith('_'):
ignore = True
elif func_name.endswith('_'):
ignore = True
elif not func_name[0].islower():
ignore = True
elif func_name == 'unique_dim':
continue
else:
func = getattr(namespace, func_name)
if getattr(object, func_name, None) == func:
continue
if func_name == '__weakref__':
continue
func = getattr(namespace, func_name)
if namespace is torch.Tensor and getattr(object, func_name, None) == func:
continue
# ignore re-exported modules
if isinstance(func, types.ModuleType):
continue
# ignore __future__ imports
if isinstance(func, __future__._Feature):
continue
if not callable(func) and hasattr(func, "__get__"):
index[func.__get__] = f"{namespace_str}.{func_name}.__get__"
index[func.__set__] = f"{namespace_str}.{func_name}.__set__"
if ignore:
continue
if func.__get__ in get_ignored_functions():
msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
"but still has an explicit override")
assert func.__get__ not in get_testing_overrides(), msg.format(namespace, func.__name__)
continue
else:
overridable_funcs[func].append(func.__get__)
continue
if not callable(func):
continue
index[func] = f"{namespace_str}.{func_name}"
if ignore:
continue
# cannot be overriden by __torch_function__
if func in get_ignored_functions():
msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
"but still has an explicit override")
assert func not in get_testing_overrides(), msg.format(namespace, func.__name__)
continue
overridable_funcs[namespace].append(func)
return overridable_funcs, index
def get_overridable_functions() -> Dict[Any, List[Callable]]:
"""List functions that are overridable via __torch_function__
Returns
-------
Dict[Any, List[Callable]]
A dictionary that maps namespaces that contain overridable functions
to functions in that namespace that can be overridden.
"""
return _get_overridable_functions()[0]
def resolve_name(f):
"""Get a human readable string name for a function passed to
__torch_function__
Arguments
---------
callable : Callable
Function to resolve the name of.
Returns
-------
str
Name of the function; if eval'ed it should give back the input
function.
"""
if isinstance(f, torch._ops.OpOverload):
return str(f)
return _get_overridable_functions()[1].get(f)
@functools.lru_cache(None)
def _get_tensor_methods() -> Set[Callable]:
""" Returns a set of the overridable methods on ``torch.Tensor`` """
overridable_funcs = get_overridable_functions()
methods = set(overridable_funcs[torch.Tensor])
return methods
def is_tensor_method_or_property(func: Callable) -> bool:
"""
Returns True if the function passed in is a handler for a
method or property belonging to ``torch.Tensor``, as passed
into ``__torch_function__``.
.. note::
For properties, their ``__get__`` method must be passed in.
This may be needed, in particular, for the following reasons:
1. Methods/properties sometimes don't contain a `__module__` slot.
2. They require that the first passed-in argument is an instance
of ``torch.Tensor``.
Examples
--------
>>> is_tensor_method_or_property(torch.Tensor.add)
True
>>> is_tensor_method_or_property(torch.add)
False
"""
return func in _get_tensor_methods() or func.__name__ == "__get__"
def is_tensor_like(inp):
"""
Returns ``True`` if the passed-in input is a Tensor-like.
Currently, this occurs whenever there's a ``__torch_function__``
attribute on the type of the input.
Examples
--------
A subclass of tensor is generally a Tensor-like.
>>> class SubTensor(torch.Tensor): ...
>>> is_tensor_like(SubTensor([0]))
True
Built-in or user types aren't usually Tensor-like.
>>> is_tensor_like(6)
False
>>> is_tensor_like(None)
False
>>> class NotATensor: ...
>>> is_tensor_like(NotATensor())
False
But, they can be made Tensor-like by implementing __torch_function__.
>>> class TensorLike:
... @classmethod
... def __torch_function__(cls, func, types, args, kwargs):
... return -1
>>> is_tensor_like(TensorLike())
True
"""
return type(inp) is torch.Tensor or hasattr(type(inp), "__torch_function__")
def _wrap_torch_function(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if isinstance(f, classmethod):
raise RuntimeError("TorchFunctionMode's torch_function function " +
"should be a normal method not a class method")
inner = getattr(self, "inner", None)
with enable_torch_function_mode(inner):
return f(self, *args, **kwargs)
return wrapped
# Implementation note: I had a choice about how much of mode stacks
# to implement in Python versus in C++. At time of writing, I did not care
# too much about implementation efficiency; however, I do care about making it
# hard for users to implement modes in the wrong way. In the end, it turned
# out to be possible to implement mode stacks entirely from userland, with the
# C++ API providing only _get_torch_function_mode() and
# _set_torch_function_mode(), so I opted to provide some unsafe C++ bindings and
# have the bulk of the logic for managing the stack in Python, which helped
# simplify the C++ API surface. It would also have been valid to build in the
# notion of mode stack directly into C++ but in this design it's substantially
# more difficult to interact with TorchFunctionModeMeta.
class TorchFunctionModeMeta(type):
"""
Metaclass for :class:`TorchFunctionMode`; it does two things:
* Adds an implicit ``inner`` kwarg to ``__init__``, to
allow the modes to be chained together to form a stack.
* Reenables the inner mode, so that by default PyTorch API calls
will compositionally proceed to the next mode on the stack.
The default behavior for the second bullet is important, as it is easy to
accidentally write ``__torch_function__`` implementations that are not
compositional, and the wrapping here makes the obvious code do the
right thing (aka, this is why there is a metaclass).
"""
def __new__(metacls, name, bases, dct):
if '__init__' in dct:
dct['__init__'] = _wrap_init(dct['__init__'])
if '__torch_function__' in dct:
dct['__torch_function__'] = _wrap_torch_function(dct['__torch_function__'])
return super().__new__(metacls, name, bases, dct)
class TorchFunctionMode(metaclass=TorchFunctionModeMeta):
"""
A ``TorchFunctionMode`` allows you to override the meaning of all
``__torch_function__`` overrideable functions within a dynamic scope,
without having to actually create a tensor subclass or manually
monkey-patch functions in the PyTorch API. Some common situations
where you should use a mode:
* You want to override the meaning of factory functions, or other
functions that do not otherwise take a tensor as an argument
(these cannot be overridden with tensor subclasses).
* You want to override the behavior of all functions without needing
to wrap your inputs in tensor subclasses; e.g., if you are just
interested in logging intermediate computations.
* You want to control the order of execution of various tensor
subclasses explicitly, rather than implicitly via the return of
``NotImplemented``.
Independent subclasses of :class:`TorchFunctionMode` are compositional:
modes can be pushed onto a stack using ``with MyMode():``.
When you call functions in the PyTorch API inside your
``__torch_function__`` implementation, by default, they will forward on to
the next mode on the mode stack. If you want recursively call back into
your current ``__torch_function__`` implementation, either explicitly
invoke ``self.__torch_function__(...)``, or use the context manager
``enable_torch_function_mode(self, replace=self.inner)`` to make PyTorch
API self-referential (beware of infinite loops, in this case!)
"""
inner: "TorchFunctionMode"
# Force metaclass to generate constructor at the base of the hierarchy
def __init__(self):
pass
def __torch_function__(self, func, types, args=(), kwargs=None):
raise NotImplementedError()
def __enter__(self):
old = _get_torch_function_mode()
if hasattr(self, "inner"):
raise RuntimeError(f"{self} has already been used as a mode. Please use a fresh version or use restore")
else:
self.inner = old
if old is None:
self.ancestors = set()
else:
self.ancestors = self.inner.ancestors.union({self.inner})
_set_torch_function_mode(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
_set_torch_function_mode(self.inner)
@contextlib.contextmanager
def restore(self):
return _restore_mode(self, mode_info=_TorchFunctionModeInfo())
@classmethod
def push(cls, *args, **kwargs):
warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`")
instance = cls(*args, **kwargs)
return instance
class BaseTorchFunctionMode(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
# This is private API as I'm not sure it's possible for users to use this
# compositionally (easy to discard too many modes). It is useful for
# library code though, e.g., in handle_torch_function
@contextlib.contextmanager
def _no_torch_function_mode() -> Iterator[None]:
old = _get_torch_function_mode()
_set_torch_function_mode(None)
try:
yield
finally:
_set_torch_function_mode(old)
class _TorchFunctionModeInfo(_ModeInfo):
def __init__(self):
super().__init__(mode_name="torch_function", mode_class=TorchFunctionMode)
def get_mode(self):
return _get_torch_function_mode()
def set_mode(self, mode):
return _set_torch_function_mode(mode)
@contextlib.contextmanager
def enable_torch_function_mode(mode, *, replace=None, ignore_preexisting=False) -> Iterator[None]:
"""
Context manager that sets the current :class:`TorchFunctionMode`; see the
class for more information on what modes are. This function is
non-compositional; if there is already an existing mode, it will raise an
error; prefer using ``with MyMode():`` if your ``__torch_function__``
implementation can defer to an inner mode.
This function is safe to use inside a ``__torch_function__`` mode handler,
as the mode is guaranteed to be disabled in this context. You can use
this context manager to reinstate the mode so that calls to overridable
APIs recursively call back into your mode handler (this can easily cause
infinite loops, so use with care!)
Args:
mode (:class:`TorchFunctionMode`, Tensor-like class or None): the
mode to set as current mode. If you pass a Tensor-like class,
it will be treated as a non-compositional mode with no state,
which is convenient if you have an existing tensor subclass
that you'd like to apply globally in a quick and dirty way.
Passing None will disable the current mode.
replace (:class:`TorchFunctionMode` or Tensor-like class): the
mode to replace. You can use this argument to change the mode in
a situation where you know what the current mode is (and you are
intentionally overwriting it.) If you don't know what the current
mode is, use ``ignore_preexisting`` instead.
ignore_preexisting (bool): if True, ignore any preexisting mode
and overwrite it with the passed mode.
"""
return _enable_mode(mode, _TorchFunctionModeInfo(), replace=replace, ignore_preexisting=ignore_preexisting)
class enable_reentrant_dispatch():
def __enter__(self):
self._raii_guard = torch._C._RestorePythonTLSSnapshot()
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
del self._raii_guard
def get_buffer(tensor_subclass, data, prefix):
import ctypes
assert prefix in {"stride", "size", "sym_size"}
buffer_name = f"_{prefix}_buffer"
if not hasattr(tensor_subclass, buffer_name):
SizeType = ctypes.c_longlong * len(data)
setattr(tensor_subclass, buffer_name, SizeType(*data))
ptr = ctypes.addressof(getattr(tensor_subclass, buffer_name))
return (ptr, len(data))
|
pytorch-master
|
torch/overrides.py
|
from collections import OrderedDict
"""
This file contains helper functions that implement experimental functionality
for named tensors in python. All of these are experimental, unstable, and
subject to change or deletion.
"""
def check_serializing_named_tensor(tensor):
if tensor.has_names():
raise RuntimeError(
"NYI: Named tensors don't support serialization. Please drop "
"names via `tensor = tensor.rename(None)` before serialization."
)
def build_dim_map(tensor):
"""Returns a map of { dim: dim_name } where dim is a name if the dim is named
and the dim index otherwise."""
return OrderedDict(
[(idx if name is None else name, name) for idx, name in enumerate(tensor.names)]
)
def unzip_namedshape(namedshape):
if isinstance(namedshape, OrderedDict):
namedshape = namedshape.items()
if not hasattr(namedshape, "__iter__") and not isinstance(namedshape, tuple):
raise RuntimeError(
"Expected namedshape to be OrderedDict or iterable of tuples, got: {}".format(
type(namedshape)
)
)
if len(namedshape) == 0:
raise RuntimeError("Expected namedshape to non-empty.")
return zip(*namedshape)
def namer_api_name(inplace):
if inplace:
return "rename_"
else:
return "rename"
def is_ellipsis(item):
return item == Ellipsis or item == "..."
def single_ellipsis_index(names, fn_name):
ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
if len(ellipsis_indices) >= 2:
raise RuntimeError(
"{}: More than one Ellipsis ('...') found in names ("
"{}). This function supports up to one Ellipsis.".format(fn_name, names)
)
if len(ellipsis_indices) == 1:
return ellipsis_indices[0]
return None
def expand_single_ellipsis(numel_pre_glob, numel_post_glob, names):
return names[numel_pre_glob : len(names) - numel_post_glob]
def replace_ellipsis_by_position(ellipsis_idx, names, tensor_names):
globbed_names = expand_single_ellipsis(
ellipsis_idx, len(names) - ellipsis_idx - 1, tensor_names
)
return names[:ellipsis_idx] + globbed_names + names[ellipsis_idx + 1 :]
def resolve_ellipsis(names, tensor_names, fn_name):
"""
Expands ... inside `names` to be equal to a list of names from `tensor_names`.
"""
ellipsis_idx = single_ellipsis_index(names, fn_name)
if ellipsis_idx is None:
return names
return replace_ellipsis_by_position(ellipsis_idx, names, tensor_names)
def update_names_with_list(tensor, names, inplace):
# Special case for tensor.rename(None)
if len(names) == 1 and names[0] is None:
return tensor._update_names(None, inplace)
return tensor._update_names(
resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace
)
def update_names_with_mapping(tensor, rename_map, inplace):
dim_map = build_dim_map(tensor)
for old_dim in rename_map.keys():
new_dim = rename_map[old_dim]
if old_dim in dim_map.keys():
dim_map[old_dim] = new_dim
else:
raise RuntimeError(
(
"{api_name}: Tried to rename dim '{old_dim}' to dim "
"{new_dim} in Tensor[{dims}] but dim '{old_dim}' does not exist"
).format(
old_dim=old_dim,
new_dim=new_dim,
dims=tensor.names,
api_name=namer_api_name(inplace),
)
)
return tensor._update_names(tuple(dim_map.values()), inplace)
def update_names(tensor, names, rename_map, inplace):
"""There are two usages:
tensor.rename(*names) returns a view on tensor with named dims `names`.
`names` must be of length `tensor.dim()`; otherwise, if '...' is in `names`,
then it is expanded greedily to be equal to the corresponding names from
`tensor.names`.
For example,
```
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> x.rename('...', 'height', 'width').names
('N', 'C', 'height', 'width')
>>> x.rename('batch', '...', 'width').names
('batch', 'C', 'H', 'width')
```
tensor.rename(**rename_map) returns a view on tensor that has rename dims
as specified in the mapping `rename_map`.
For example,
```
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> x.rename(W='width', H='height').names
('N', 'C', 'height', 'width')
```
Finally, tensor.rename has an in-place version called tensor.rename_.
"""
has_names = len(names) > 0
has_rename_pairs = bool(rename_map)
if has_names and has_rename_pairs:
raise RuntimeError(
"{api_name}: This function takes either positional "
"args or keyword args, but not both. Use tensor.{api_name}(*names) "
"to name dims and tensor.{api_name}(**rename_map) to rename "
"dims.".format(api_name=namer_api_name(inplace))
)
# Special case for tensor.rename(*[]), which is valid for a 0 dim tensor.
if not has_names and not has_rename_pairs:
return update_names_with_list(tensor, names, inplace)
if has_names:
return update_names_with_list(tensor, names, inplace)
return update_names_with_mapping(tensor, rename_map, inplace)
|
pytorch-master
|
torch/_namedtensor_internals.py
|
import torch
from typing import Any, List, Sequence, Tuple, Union
import builtins
# Convenience aliases for common composite types that we need
# to talk about in PyTorch
_TensorOrTensors = Union[torch.Tensor, Sequence[torch.Tensor]]
# In some cases, these basic types are shadowed by corresponding
# top-level values. The underscore variants let us refer to these
# types. See https://github.com/python/mypy/issues/4146 for why these
# workarounds is necessary
_int = builtins.int
_float = builtins.float
_bool = builtins.bool
_dtype = torch.dtype
_device = torch.device
_qscheme = torch.qscheme
_size = Union[torch.Size, List[_int], Tuple[_int, ...]]
_layout = torch.layout
class SymInt:
pass
# Meta-type for "numeric" things; matches our docs
Number = Union[builtins.int, builtins.float, builtins.bool]
# Meta-type for "device-like" things. Not to be confused with 'device' (a
# literal device object). This nomenclature is consistent with PythonArgParser.
# None means use the default device (typically CPU)
Device = Union[_device, str, None]
# Storage protocol implemented by ${Type}StorageBase classes
class Storage(object):
_cdata: int
device: torch.device
dtype: torch.dtype
_torch_load_uninitialized: bool
def __deepcopy__(self, memo) -> 'Storage':
...
def _new_shared(self, int) -> 'Storage':
...
def _write_file(self, f: Any, is_real_file: _bool, save_size: _bool, element_size: int) -> None:
...
def element_size(self) -> int:
...
def is_shared(self) -> bool:
...
def share_memory_(self) -> 'Storage':
...
def nbytes(self) -> int:
...
def cpu(self) -> 'Storage':
...
def data_ptr(self) -> int:
...
def from_file(self, filename: str, shared: bool = False, nbytes: int = 0) -> 'Storage':
...
def _new_with_file(self, f: Any, element_size: int) -> 'Storage':
...
...
|
pytorch-master
|
torch/types.py
|
"""Various linear algebra utility methods for internal use.
"""
from typing import Optional, Tuple
import torch
from torch import Tensor
def is_sparse(A):
"""Check if tensor A is a sparse tensor"""
if isinstance(A, torch.Tensor):
return A.layout == torch.sparse_coo
error_str = "expected Tensor"
if not torch.jit.is_scripting():
error_str += " but got {}".format(type(A))
raise TypeError(error_str)
def get_floating_dtype(A):
"""Return the floating point dtype of tensor A.
Integer types map to float32.
"""
dtype = A.dtype
if dtype in (torch.float16, torch.float32, torch.float64):
return dtype
return torch.float32
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
"""Multiply two matrices.
If A is None, return B. A can be sparse or dense. B is always
dense.
"""
if A is None:
return B
if is_sparse(A):
return torch.sparse.mm(A, B)
return torch.matmul(A, B)
def conjugate(A):
"""Return conjugate of tensor A.
.. note:: If A's dtype is not complex, A is returned.
"""
if A.is_complex():
return A.conj()
return A
def transpose(A):
"""Return transpose of a matrix or batches of matrices."""
ndim = len(A.shape)
return A.transpose(ndim - 1, ndim - 2)
def transjugate(A):
"""Return transpose conjugate of a matrix or batches of matrices."""
return conjugate(transpose(A))
def bform(X: Tensor, A: Optional[Tensor], Y: Tensor) -> Tensor:
"""Return bilinear form of matrices: :math:`X^T A Y`."""
return matmul(transpose(X), matmul(A, Y))
def qform(A: Optional[Tensor], S: Tensor):
"""Return quadratic form :math:`S^T A S`."""
return bform(S, A, S)
def basis(A):
"""Return orthogonal basis of A columns."""
if A.is_cuda:
# torch.orgqr is not available in CUDA
Q = torch.linalg.qr(A).Q
else:
Q = torch.orgqr(*torch.geqrf(A))
return Q
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
"""Return eigenpairs of A with specified ordering."""
if largest is None:
largest = False
E, Z = torch.linalg.eigh(A, UPLO="U")
# assuming that E is ordered
if largest:
E = torch.flip(E, dims=(-1,))
Z = torch.flip(Z, dims=(-1,))
return E, Z
# This function was deprecated and removed
# This nice error message can be removed in version 1.13+
def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
raise RuntimeError(
"This function was deprecated since version 1.9 and is now removed. Please use the `torch.linalg.solve` function instead.",
)
|
pytorch-master
|
torch/_linalg_utils.py
|
import torch
def show():
"""
Return a human-readable string with descriptions of the
configuration of PyTorch.
"""
return torch._C._show_config()
# TODO: In principle, we could provide more structured version/config
# information here. For now only CXX_FLAGS is exposed, as Timer
# uses them.
def _cxx_flags():
"""Returns the CXX_FLAGS used when building PyTorch."""
return torch._C._cxx_flags()
def parallel_info():
r"""Returns detailed string with parallelization settings"""
return torch._C._parallel_info()
|
pytorch-master
|
torch/__config__.py
|
import copyreg
import enum
import functools
import warnings
from collections import OrderedDict
from copy import deepcopy
from numbers import Number
from typing import Any, Dict, Optional, Tuple, Union
import torch
import torch._C as _C
import torch.utils.hooks as hooks
from torch._namedtensor_internals import (
check_serializing_named_tensor,
is_ellipsis,
resolve_ellipsis,
single_ellipsis_index,
unzip_namedshape,
update_names,
)
from torch.overrides import (
get_default_nowrap_functions,
handle_torch_function,
has_torch_function,
has_torch_function_unary,
has_torch_function_variadic,
)
def _handle_torch_function_and_wrap_type_error_to_not_implemented(f):
# functools.wraps doesn't work well with methods in python 2
method_assignments = ("__name__", "__doc__")
assigned = functools.WRAPPER_ASSIGNMENTS
@functools.wraps(f, assigned=assigned)
def wrapped(*args, **kwargs):
try:
# See https://github.com/pytorch/pytorch/issues/75462
if has_torch_function(args):
return handle_torch_function(wrapped, args, *args, **kwargs)
return f(*args, **kwargs)
except TypeError:
return NotImplemented
return wrapped
# Should not be used, this is kept only for BC of loading old serialized Tensor subclasses
def _rebuild_from_type(func, type, args, dict):
if type is Tensor:
return func(*args)
ret = func(*args).as_subclass(type)
ret.__dict__ = dict
return ret
def _rebuild_from_type_v2(func, new_type, args, state):
if new_type is Tensor:
return func(*args)
ret = func(*args)
if type(ret) is not new_type:
ret = ret.as_subclass(new_type)
# Tensor does define __setstate__ even though it doesn't define
# __getstate__. So only use __setstate__ if it is NOT the one defined
# on Tensor
if (
getattr(ret.__class__, "__setstate__", Tensor.__setstate__)
is not Tensor.__setstate__
):
ret.__setstate__(state)
else:
if isinstance(state, tuple):
if not len(state) == 2:
raise RuntimeError(f"Invalid serialized state: {state}")
dict_state = state[0]
slots_state = state[1]
else:
dict_state = state
slots_state = None
for k, v in dict_state.items():
setattr(ret, k, v)
if slots_state:
for k, v in slots_state.items():
setattr(ret, k, v)
return ret
# NB: If you subclass Tensor, and want to share the subclassed class
# across processes, you must also update torch/multiprocessing/reductions.py
# to define a ForkingPickler serialization mode for the class.
#
# NB: If you add a new method to Tensor, you must update
# torch/__init__.py.in to add a type annotation for your method;
# otherwise, it will not show up in autocomplete.
class Tensor(torch._C._TensorBase):
def __deepcopy__(self, memo):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__deepcopy__, (self,), self, memo)
if not self.is_leaf:
raise RuntimeError(
"Only Tensors created explicitly by the user "
"(graph leaves) support the deepcopy protocol at the moment"
)
if id(self) in memo:
return memo[id(self)]
with torch.no_grad():
# TODO: skipping storage copy is wrong for meta, as meta
# does accurate alias tracking; however, the code below
# doesn't work because of
# https://github.com/pytorch/pytorch/issues/47442
# Update the test in test_serialization if you remove 'meta' from here
if (
self.is_sparse
or self.device.type in ["lazy", "xla", "mps", "ort", "meta", "hpu"]
or (type(self) is not Tensor and self.data_ptr() == 0)
):
new_tensor = self.clone()
if type(new_tensor) is not type(self):
raise RuntimeError(
"The default implementation of __deepcopy__() for wrapper subclasses "
"only works for subclass types that implement clone() and for which "
"cloning returns another instance of the same subclass. You should either "
"properly implement clone() for your subclass or override __deepcopy__() "
"if it is intended behavior for clone() to return an instance of a "
"different type."
)
else:
new_storage = self.storage().__deepcopy__(memo)
if self.is_quantized:
# quantizer_params can be different type based on torch attribute
quantizer_params: Union[
Tuple[torch.qscheme, float, int],
Tuple[torch.qscheme, Tensor, Tensor, int],
]
if self.qscheme() == torch.per_tensor_affine:
quantizer_params = (
self.qscheme(),
self.q_scale(),
self.q_zero_point(),
)
elif self.qscheme() in (
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
):
quantizer_params = (
self.qscheme(),
self.q_per_channel_scales(),
self.q_per_channel_zero_points(),
self.q_per_channel_axis(),
)
else:
raise RuntimeError(
f"Unsupported qscheme {self.qscheme()} in deepcopy"
)
# TODO: Once we decide to break serialization FC, no longer
# need to wrap with TypedStorage
new_tensor = torch._utils._rebuild_qtensor(
torch.storage.TypedStorage(
wrap_storage=new_storage.untyped(), dtype=self.dtype
),
self.storage_offset(),
self.size(),
self.stride(),
quantizer_params,
self.requires_grad,
self._backward_hooks,
)
if type(new_tensor) is not type(self):
raise RuntimeError(
"The default implementation of __deepcopy__() for quantized tensors "
"expects the tensor returned by torch._utils._rebuild_qtensor() to "
"match the type of the instance being copied. If you encounter this, "
"please open an issue on PyTorch's GitHub."
)
else:
new_tensor = self.new_empty([])
if type(new_tensor) is not type(self):
raise RuntimeError(
"The default implementation of __deepcopy__() for non-wrapper subclasses "
"only works for subclass types that implement new_empty() and for which "
"that function returns another instance of the same subclass. You should "
"either properly implement new_empty() for your subclass or override "
"__deepcopy__() if it is intended behavior for new_empty() to return "
"an instance of a different type."
)
new_tensor.set_(
new_storage, self.storage_offset(), self.size(), self.stride()
)
if self.is_conj():
new_tensor = new_tensor.conj_physical()
if self.is_neg():
new_tensor = new_tensor.neg()
if self.requires_grad:
new_tensor.requires_grad_()
if self.grad is not None:
new_tensor.grad = self.grad.__deepcopy__(memo)
if not type(self) is Tensor:
if type(new_tensor) is not type(self):
raise RuntimeError(
"Type of deepcopy result does not match the type of the source tensor. "
"If you encounter this, please open an issue on PyTorch's GitHub."
)
# Plain Tensors don't have slots
slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
for slot in slots_to_save:
if hasattr(self, slot):
setattr(new_tensor, slot, deepcopy(getattr(self, slot), memo))
new_tensor.__dict__ = deepcopy(self.__dict__, memo)
memo[id(self)] = new_tensor
return new_tensor
def __reduce_ex__(self, proto):
if type(self) is Tensor:
return self._reduce_ex_internal(proto)
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__reduce_ex__, (self,), self, proto)
func, args = self._reduce_ex_internal(proto)
# Get the state of the python subclass
# This loosely mimicks the function on the object class but since Tensor do not inherit
# from it, we cannot call that function directly
# https://github.com/python/cpython/blob/c83919bd635f4433f1c6ae8504996a9fe3c215e5/Objects/typeobject.c#L4891
getstate_fn = getattr(self, "__getstate__", None)
if getstate_fn:
state = getstate_fn()
else:
slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
if slots_to_save:
state = (
self.__dict__,
{
name: getattr(self, name)
for name in slots_to_save
if hasattr(self, name)
},
)
else:
state = self.__dict__
return (_rebuild_from_type_v2, (func, type(self), args, state))
def storage(self):
r"""
storage() -> torch.Storage
Returns the underlying storage.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.storage, (self,), self)
return torch.TypedStorage(wrap_storage=self._storage(), dtype=self.dtype)
def _reduce_ex_internal(self, proto):
check_serializing_named_tensor(self)
# See Note [Don't serialize hooks]
torch.utils.hooks.warn_if_has_hooks(self)
backward_hooks: Dict[Any, Any] = OrderedDict()
# Note: Numpy array is chosen to be the rebuild component for XLA, ORT Tensors.
# We considered a few options:
# 1. CPU tensor can't be used here.
# Otherwise in torch.load CPU storage is reconstructed with randomly
# initialized data, moved onto backend device, and then storage is updated
# to the serialized content. This works perfectly for CPU/CUDA but not these backends;
# their tensors are disconnected with storage so they don't get the update.
# 2. Python list is not a good fit due to performance reason.
# `tolist()` converts every single element in the tensor into python objects
# and serialize them one by one.
if self.device.type in ["xla", "ort", "hpu"]:
# Convert BFloat16 tesors to Float32 before conversion to numpy, as numpy doesn't
# support BFloat16. The rebuild tensor from numpy takes in the original self.dtype,
# this would reconstruct the BFloat16 tensor from numpy.
numpy_tensor = (
self.cpu().numpy()
if self.dtype != torch.bfloat16
else self.cpu().to(torch.float32).numpy()
)
return (
torch._utils._rebuild_device_tensor_from_numpy,
(numpy_tensor, self.dtype, str(self.device), self.requires_grad),
)
if self.device.type == "meta":
# NB: This implementation BREAKS storage sharing. Current
# hypothesis is that no one cares for meta tensors.
arg_meta = (
self.dtype,
tuple(self.size()),
self.stride(),
self.requires_grad,
)
return (torch._utils._rebuild_meta_tensor_no_storage, arg_meta)
if self.is_quantized:
# quantizer_params can be different type based on torch attribute
quantizer_params: Union[
Tuple[torch.qscheme, float, int], Tuple[Any, Tensor, Tensor, int]
]
if self.qscheme() == torch.per_tensor_affine:
quantizer_params = (
torch.per_tensor_affine,
self.q_scale(),
self.q_zero_point(),
)
elif self.qscheme() in (
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
):
# convert scales and zero points to tuple to avoid recursive calls
# when/if we get multi-axis quantized tensors in the future, the shape
# is recoverable from the main tensor shape
quantizer_params = (
torch.per_channel_affine,
self.q_per_channel_scales(),
self.q_per_channel_zero_points(),
self.q_per_channel_axis(),
)
else:
raise RuntimeError(
f"Serialization is not supported for tensors of type {self.qscheme()}"
)
# TODO: Once we decide to break serialization FC, no longer
# need to wrap with TypedStorage
args_qtensor = (
torch.storage.TypedStorage(
wrap_storage=self.storage().untyped(), dtype=self.dtype
),
self.storage_offset(),
tuple(self.size()),
self.stride(),
quantizer_params,
self.requires_grad,
backward_hooks,
)
return (torch._utils._rebuild_qtensor, args_qtensor)
elif self.is_sparse:
if self.layout == torch.sparse_coo:
args_sparse = (
self.layout,
(self._indices(), self._values(), self.size()),
)
else:
raise NotImplementedError(
"sparse tensor __reduce_ex__ for layout `%s`" % (self.layout)
)
return (torch._utils._rebuild_sparse_tensor, args_sparse)
elif self.is_sparse_csr:
if self.layout == torch.sparse_csr:
args_sparse_csr = (
self.layout,
(
self.crow_indices(),
self.col_indices(),
self.values(),
self.size(),
),
)
else:
raise NotImplementedError(
"sparse csr tensor __reduce_ex__ for layout `%s`" % (self.layout)
)
return (torch._utils._rebuild_sparse_csr_tensor, args_sparse_csr)
elif (
self.data_ptr() == 0
and type(self) is not torch.Tensor
and type(self).__torch_dispatch__ is not torch.Tensor.__torch_dispatch__
):
arg_wrapper_subclass = (
type(self),
self.dtype,
tuple(self.size()),
self.stride(),
self.storage_offset(),
self.layout,
self.device,
self.requires_grad,
)
return (torch._utils._rebuild_wrapper_subclass, arg_wrapper_subclass)
else:
# TODO: Once we decide to break serialization FC, no longer
# need to wrap with TypedStorage
args = (
torch.storage.TypedStorage(
wrap_storage=self.storage().untyped(), dtype=self.dtype
),
self.storage_offset(),
tuple(self.size()),
self.stride(),
self.requires_grad,
backward_hooks,
) # previously was self._backward_hooks
return (torch._utils._rebuild_tensor_v2, args)
def __setstate__(self, state):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__setstate__, (self,), self, state)
# Warning: this method is NOT called when you torch.load() a tensor;
# that is managed by _rebuild_tensor_v2
if not self.is_leaf:
raise RuntimeError("__setstate__ can be only called on leaf Tensors")
if len(state) == 4:
# legacy serialization of Tensor
self.set_(*state)
return
elif len(state) == 5:
# legacy serialization of Variable
self.data = state[0]
state = (state[3], state[4], state[2])
# The setting of _backward_hooks is expected to be a no-op.
# See Note [Don't serialize hooks]
self.requires_grad, _, self._backward_hooks = state
def __repr__(self, *, tensor_contents=None):
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.__repr__, (self,), self, tensor_contents=tensor_contents
)
# All strings are unicode in Python 3.
return torch._tensor_str._str(self, tensor_contents=tensor_contents)
def backward(
self, gradient=None, retain_graph=None, create_graph=False, inputs=None
):
r"""Computes the gradient of current tensor w.r.t. graph leaves.
The graph is differentiated using the chain rule. If the tensor is
non-scalar (i.e. its data has more than one element) and requires
gradient, the function additionally requires specifying ``gradient``.
It should be a tensor of matching type and location, that contains
the gradient of the differentiated function w.r.t. ``self``.
This function accumulates gradients in the leaves - you might need to zero
``.grad`` attributes or set them to ``None`` before calling it.
See :ref:`Default gradient layouts<default-grad-layouts>`
for details on the memory layout of accumulated gradients.
.. note::
If you run any forward ops, create ``gradient``, and/or call ``backward``
in a user-specified CUDA stream context, see
:ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
.. note::
When ``inputs`` are provided and a given input is not a leaf,
the current implementation will call its grad_fn (though it is not strictly needed to get this gradients).
It is an implementation detail on which the user should not rely.
See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
Args:
gradient (Tensor or None): Gradient w.r.t. the
tensor. If it is a tensor, it will be automatically converted
to a Tensor that does not require grad unless ``create_graph`` is True.
None values can be specified for scalar Tensors or ones that
don't require grad. If a None value would be acceptable then
this argument is optional.
retain_graph (bool, optional): If ``False``, the graph used to compute
the grads will be freed. Note that in nearly all cases setting
this option to True is not needed and often can be worked around
in a much more efficient way. Defaults to the value of
``create_graph``.
create_graph (bool, optional): If ``True``, graph of the derivative will
be constructed, allowing to compute higher order derivative
products. Defaults to ``False``.
inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be
accumulated into ``.grad``. All other Tensors will be ignored. If not
provided, the gradient is accumulated into all the leaf Tensors that were
used to compute the attr::tensors.
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.backward,
(self,),
self,
gradient=gradient,
retain_graph=retain_graph,
create_graph=create_graph,
inputs=inputs,
)
torch.autograd.backward(
self, gradient, retain_graph, create_graph, inputs=inputs
)
def register_hook(self, hook):
r"""Registers a backward hook.
The hook will be called every time a gradient with respect to the
Tensor is computed. The hook should have the following signature::
hook(grad) -> Tensor or None
The hook should not modify its argument, but it can optionally return
a new gradient which will be used in place of :attr:`grad`.
This function returns a handle with a method ``handle.remove()``
that removes the hook from the module.
Example::
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
>>> h = v.register_hook(lambda grad: grad * 2) # double the gradient
>>> v.backward(torch.tensor([1., 2., 3.]))
>>> v.grad
2
4
6
[torch.FloatTensor of size (3,)]
>>> h.remove() # removes the hook
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.register_hook, (self,), self, hook)
if not self.requires_grad:
raise RuntimeError(
"cannot register a hook on a tensor that " "doesn't require gradient"
)
if self._backward_hooks is None:
self._backward_hooks = OrderedDict()
if self.grad_fn is not None:
self.grad_fn._register_hook_dict(self)
handle = hooks.RemovableHandle(self._backward_hooks)
self._backward_hooks[handle.id] = hook
return handle
def reinforce(self, reward):
def trim(str):
return "\n".join([line.strip() for line in str.split("\n")])
raise RuntimeError(
trim(
r"""reinforce() was removed.
Use torch.distributions instead.
See https://pytorch.org/docs/master/distributions.html
Instead of:
probs = policy_network(state)
action = probs.multinomial()
next_state, reward = env.step(action)
action.reinforce(reward)
action.backward()
Use:
probs = policy_network(state)
# NOTE: categorical is equivalent to what used to be called multinomial
m = torch.distributions.Categorical(probs)
action = m.sample()
next_state, reward = env.step(action)
loss = -m.log_prob(action) * reward
loss.backward()
"""
)
)
detach = _C._add_docstr(
_C._TensorBase.detach,
r"""
Returns a new Tensor, detached from the current graph.
The result will never require gradient.
This method also affects forward mode AD gradients and the result will never
have forward mode AD gradients.
.. note::
Returned Tensor shares the same storage with the original one.
In-place modifications on either of them will be seen, and may trigger
errors in correctness checks.
IMPORTANT NOTE: Previously, in-place size / stride / storage changes
(such as `resize_` / `resize_as_` / `set_` / `transpose_`) to the returned tensor
also update the original tensor. Now, these in-place changes will not update the
original tensor anymore, and will instead trigger an error.
For sparse tensors:
In-place indices / values changes (such as `zero_` / `copy_` / `add_`) to the
returned tensor will not update the original tensor anymore, and will instead
trigger an error.
""",
)
detach_ = _C._add_docstr(
_C._TensorBase.detach_,
r"""
Detaches the Tensor from the graph that created it, making it a leaf.
Views cannot be detached in-place.
This method also affects forward mode AD gradients and the result will never
have forward mode AD gradients.
""",
)
def is_shared(self):
r"""Checks if tensor is in shared memory.
This is always ``True`` for CUDA tensors.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.is_shared, (self,), self)
return self.storage().is_shared()
def share_memory_(self):
r"""Moves the underlying storage to shared memory.
This is a no-op if the underlying storage is already in shared memory
and for CUDA tensors. Tensors in shared memory cannot be resized.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.share_memory_, (self,), self)
self.storage().share_memory_()
return self
def __reversed__(self):
r"""Reverses the tensor along dimension 0."""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__reversed__, (self,), self)
if self.dim() == 0:
return self
else:
return self.flip(0)
def norm(self, p="fro", dim=None, keepdim=False, dtype=None):
r"""See :func:`torch.norm`"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.norm, (self,), self, p=p, dim=dim, keepdim=keepdim, dtype=dtype
)
return torch.norm(self, p, dim, keepdim, dtype=dtype)
def solve(self, other):
from ._linalg_utils import solve
return solve(self, other)
def lu(self, pivot=True, get_infos=False):
r"""See :func:`torch.lu`"""
# If get_infos is True, then we don't need to check for errors and vice versa
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.lu, (self,), self, pivot=pivot, get_infos=get_infos
)
LU, pivots, infos = torch._lu_with_info(
self, pivot=pivot, check_errors=(not get_infos)
)
if get_infos:
return LU, pivots, infos
else:
return LU, pivots
def stft(
self,
n_fft: int,
hop_length: Optional[int] = None,
win_length: Optional[int] = None,
window: "Optional[Tensor]" = None,
center: bool = True,
pad_mode: str = "reflect",
normalized: bool = False,
onesided: Optional[bool] = None,
return_complex: Optional[bool] = None,
):
r"""See :func:`torch.stft`
.. warning::
This function changed signature at version 0.4.1. Calling with
the previous signature may cause error or return incorrect result.
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.stft,
(self,),
self,
n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=normalized,
onesided=onesided,
return_complex=return_complex,
)
return torch.stft(
self,
n_fft,
hop_length,
win_length,
window,
center,
pad_mode,
normalized,
onesided,
return_complex=return_complex,
)
def istft(
self,
n_fft: int,
hop_length: Optional[int] = None,
win_length: Optional[int] = None,
window: "Optional[Tensor]" = None,
center: bool = True,
normalized: bool = False,
onesided: Optional[bool] = None,
length: Optional[int] = None,
return_complex: bool = False,
):
r"""See :func:`torch.istft`"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.istft,
(self,),
self,
n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
length=length,
return_complex=return_complex,
)
return torch.istft(
self,
n_fft,
hop_length,
win_length,
window,
center,
normalized,
onesided,
length,
return_complex=return_complex,
)
def resize(self, *sizes):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.resize, (self,), self, *sizes)
warnings.warn("non-inplace resize is deprecated")
from torch.autograd._functions import Resize
return Resize.apply(self, sizes)
def resize_as(self, tensor):
if has_torch_function_variadic(self, tensor):
return handle_torch_function(Tensor.resize_as, (self, tensor), self, tensor)
warnings.warn("non-inplace resize_as is deprecated")
from torch.autograd._functions import Resize
return Resize.apply(self, tensor.size())
def split(self, split_size, dim=0):
r"""See :func:`torch.split`"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.split, (self,), self, split_size, dim=dim
)
if isinstance(split_size, int):
return super(Tensor, self).split(split_size, dim)
elif isinstance(split_size, Tensor):
try:
split_size = int(split_size)
return super(Tensor, self).split(split_size, dim)
except ValueError:
return super(Tensor, self).split_with_sizes(split_size, dim)
else:
return super(Tensor, self).split_with_sizes(split_size, dim)
def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None):
r"""Returns the unique elements of the input tensor.
See :func:`torch.unique`
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.unique,
(self,),
self,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim,
)
return torch.unique(
self,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim,
)
def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):
r"""Eliminates all but the first element from every consecutive group of equivalent elements.
See :func:`torch.unique_consecutive`
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.unique_consecutive,
(self,),
self,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim,
)
return torch.unique_consecutive(
self, return_inverse=return_inverse, return_counts=return_counts, dim=dim
)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rsub__(self, other):
return _C._VariableFunctions.rsub(self, other)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rdiv__(self, other):
return self.reciprocal() * other
__rtruediv__ = __rdiv__
__itruediv__ = _C._TensorBase.__idiv__
__pow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
_C._TensorBase.pow
)
__ipow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
_C._TensorBase.pow_
)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rmod__(self, other):
return torch.remainder(other, self)
def __format__(self, format_spec):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__format__, (self,), self, format_spec)
if self.dim() == 0 and not self.is_meta and type(self) is Tensor:
return self.item().__format__(format_spec)
return object.__format__(self, format_spec)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rpow__(self, other):
dtype = torch.result_type(other, self)
return torch.tensor(other, dtype=dtype, device=self.device) ** self
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __floordiv__(self, other):
return torch.floor_divide(self, other)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rfloordiv__(self, other):
return torch.floor_divide(other, self)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rlshift__(self, other):
return torch.bitwise_left_shift(other, self)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rrshift__(self, other):
return torch.bitwise_right_shift(other, self)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rmatmul__(self, other):
return torch.matmul(other, self)
__pos__ = _C._TensorBase.positive
__neg__ = _C._TensorBase.neg
__abs__ = _C._TensorBase.abs
def __len__(self):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__len__, (self,), self)
if self.dim() == 0:
raise TypeError("len() of a 0-d tensor")
if torch._C._get_tracing_state():
warnings.warn(
"Using len to get tensor shape might cause the trace to be incorrect. "
"Recommended usage would be tensor.shape[0]. "
"Passing a tensor of different shape might lead to errors or silently give "
"incorrect results.",
category=torch.jit.TracerWarning,
stacklevel=2,
)
return self.shape[0]
def __iter__(self):
# NB: we use 'imap' and not 'map' here, so that in Python 2 we get a
# generator and don't eagerly perform all the indexes. This could
# save us work, and also helps keep trace ordering deterministic
# (e.g., if you zip(*hiddens), the eager map will force all the
# indexes of hiddens[0] before hiddens[1], while the generator
# map will interleave them.)
# NB: We have intentionally skipped __torch_function__ dispatch here.
# See gh-54457
if self.dim() == 0:
raise TypeError("iteration over a 0-d tensor")
if torch._C._get_tracing_state():
warnings.warn(
"Iterating over a tensor might cause the trace to be incorrect. "
"Passing a tensor of different shape won't change the number of "
"iterations executed (and might lead to errors or silently give "
"incorrect results).",
category=torch.jit.TracerWarning,
stacklevel=2,
)
return iter(self.unbind(0))
def __hash__(self):
# Do NOT handle __torch_function__ here as user's default
# implementation that handle most functions will most likely do it wrong.
# It can be easily overridden by defining this method on the user
# subclass if needed.
return id(self)
def __dir__(self):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__dir__, (self,), self)
tensor_methods = dir(self.__class__)
tensor_methods.remove("volatile") # deprecated
attrs = list(self.__dict__.keys())
keys = tensor_methods + attrs
# property only available dense, cuda tensors
if (not self.is_cuda) or self.is_sparse:
keys.remove("__cuda_array_interface__")
return sorted(keys)
# Numpy array interface, to support `numpy.asarray(tensor) -> ndarray`
__array_priority__ = 1000 # prefer Tensor ops over numpy ones
def __array__(self, dtype=None):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__array__, (self,), self, dtype=dtype)
if dtype is None:
return self.numpy()
else:
return self.numpy().astype(dtype, copy=False)
# Wrap Numpy array again in a suitable tensor when done, to support e.g.
# `numpy.sin(tensor) -> tensor` or `numpy.greater(tensor, 0) -> ByteTensor`
def __array_wrap__(self, array):
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.__array_wrap__, (self,), self, array=array
)
if array.dtype == bool:
# Workaround, torch has no built-in bool tensor
array = array.astype("uint8")
return torch.from_numpy(array)
def __contains__(self, element):
r"""Check if `element` is present in tensor
Args:
element (Tensor or scalar): element to be checked
for presence in current tensor"
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__contains__, (self,), self, element)
if isinstance(element, (torch.Tensor, Number)):
# type hint doesn't understand the __contains__ result array
return (element == self).any().item() # type: ignore[union-attr]
raise RuntimeError(
"Tensor.__contains__ only supports Tensor or scalar, but you passed in a %s."
% type(element)
)
@property
def __cuda_array_interface__(self):
"""Array view description for cuda tensors.
See:
https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html
"""
if has_torch_function_unary(self):
# TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185
return handle_torch_function(Tensor.__cuda_array_interface__.__get__, (self,), self) # type: ignore[attr-defined]
# raise AttributeError for unsupported tensors, so that
# hasattr(cpu_tensor, "__cuda_array_interface__") is False.
if not self.is_cuda:
raise AttributeError(
"Can't get __cuda_array_interface__ on non-CUDA tensor type: %s "
"If CUDA data is required use tensor.cuda() to copy tensor to device memory."
% self.type()
)
if self.is_sparse:
raise AttributeError(
"Can't get __cuda_array_interface__ on sparse type: %s "
"Use Tensor.to_dense() to convert to a dense tensor first."
% self.type()
)
# RuntimeError, matching tensor.__array__() behavior.
if self.requires_grad:
raise RuntimeError(
"Can't get __cuda_array_interface__ on Variable that requires grad. "
"If gradients aren't required, use var.detach() to get Variable that doesn't require grad."
)
# CUDA devices are little-endian and tensors are stored in native byte
# order. 1-byte entries are endian-agnostic.
typestr = {
torch.complex64: "<c8",
torch.complex128: "<c16",
torch.float16: "<f2",
torch.float32: "<f4",
torch.float64: "<f8",
torch.uint8: "|u1",
torch.int8: "|i1",
torch.int16: "<i2",
torch.int32: "<i4",
torch.int64: "<i8",
}[self.dtype]
itemsize = self.storage().element_size()
shape = tuple(self.shape)
if self.is_contiguous():
# __cuda_array_interface__ v2 requires the strides to be omitted
# (either not set or set to None) for C-contiguous arrays.
strides = None
else:
strides = tuple(s * itemsize for s in self.stride())
data_ptr = self.data_ptr() if self.numel() > 0 else 0
data = (data_ptr, False) # read-only is false
return dict(typestr=typestr, shape=shape, strides=strides, data=data, version=2)
def storage_type(self):
r"""storage_type() -> type
Returns the type of the underlying storage.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.storage_type, (self,), self)
return self.storage()._get_legacy_storage_class()
def refine_names(self, *names):
r"""Refines the dimension names of :attr:`self` according to :attr:`names`.
Refining is a special case of renaming that "lifts" unnamed dimensions.
A ``None`` dim can be refined to have any name; a named dim can only be
refined to have the same name.
Because named tensors can coexist with unnamed tensors, refining names
gives a nice way to write named-tensor-aware code that works with both
named and unnamed tensors.
:attr:`names` may contain up to one Ellipsis (``...``).
The Ellipsis is expanded greedily; it is expanded in-place to fill
:attr:`names` to the same length as ``self.dim()`` using names from the
corresponding indices of ``self.names``.
Python 2 does not support Ellipsis but one may use a string literal
instead (``'...'``).
Args:
names (iterable of str): The desired names of the output tensor. May
contain up to one Ellipsis.
Examples::
>>> imgs = torch.randn(32, 3, 128, 128)
>>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W')
>>> named_imgs.names
('N', 'C', 'H', 'W')
>>> tensor = torch.randn(2, 3, 5, 7, 11)
>>> tensor = tensor.refine_names('A', ..., 'B', 'C')
>>> tensor.names
('A', None, None, 'B', 'C')
.. warning::
The named tensor API is experimental and subject to change.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.refine_names, (self,), self, *names)
names = resolve_ellipsis(names, self.names, "refine_names")
return super(Tensor, self).refine_names(names)
def align_to(self, *names):
r"""Permutes the dimensions of the :attr:`self` tensor to match the order
specified in :attr:`names`, adding size-one dims for any new names.
All of the dims of :attr:`self` must be named in order to use this method.
The resulting tensor is a view on the original tensor.
All dimension names of :attr:`self` must be present in :attr:`names`.
:attr:`names` may contain additional names that are not in ``self.names``;
the output tensor has a size-one dimension for each of those new names.
:attr:`names` may contain up to one Ellipsis (``...``).
The Ellipsis is expanded to be equal to all dimension names of :attr:`self`
that are not mentioned in :attr:`names`, in the order that they appear
in :attr:`self`.
Python 2 does not support Ellipsis but one may use a string literal
instead (``'...'``).
Args:
names (iterable of str): The desired dimension ordering of the
output tensor. May contain up to one Ellipsis that is expanded
to all unmentioned dim names of :attr:`self`.
Examples::
>>> tensor = torch.randn(2, 2, 2, 2, 2, 2)
>>> named_tensor = tensor.refine_names('A', 'B', 'C', 'D', 'E', 'F')
# Move the F and E dims to the front while keeping the rest in order
>>> named_tensor.align_to('F', 'E', ...)
.. warning::
The named tensor API is experimental and subject to change.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.align_to, (self,), self, *names)
ellipsis_idx = single_ellipsis_index(names, "align_to")
if ellipsis_idx is None:
return super(Tensor, self).align_to(names)
return super(Tensor, self).align_to(
[name for name in names if not is_ellipsis(name)], ellipsis_idx
)
def unflatten(self, dim, sizes):
r"""
unflatten(dim, sizes) -> Tensor
See :func:`torch.unflatten`.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.unflatten, (self,), self, dim, sizes)
if not sizes:
raise RuntimeError("unflatten: sizes must be non-empty")
names = None
if isinstance(sizes, OrderedDict) or (
isinstance(sizes, (tuple, list)) and isinstance(sizes[0], (tuple, list))
):
names, sizes = unzip_namedshape(sizes)
return super(Tensor, self).unflatten(dim, sizes, names)
else:
return super(Tensor, self).unflatten(dim, sizes)
def rename_(self, *names, **rename_map):
"""In-place version of :meth:`~Tensor.rename`."""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.rename_, (self,), self, *names, **rename_map
)
# Note [rename_ / rename API]
# The Python API for these is different from the C++ API. In Python:
# 1) tensor.rename(*names) takes a vararglist of names
# 2) tensor.rename(**rename_map) takes a map of names to rename.
# C++ is static, making it difficult to implement similar behavior.
return update_names(self, names, rename_map, inplace=True)
def rename(self, *names, **rename_map):
"""Renames dimension names of :attr:`self`.
There are two main usages:
``self.rename(**rename_map)`` returns a view on tensor that has dims
renamed as specified in the mapping :attr:`rename_map`.
``self.rename(*names)`` returns a view on tensor, renaming all
dimensions positionally using :attr:`names`.
Use ``self.rename(None)`` to drop names on a tensor.
One cannot specify both positional args :attr:`names` and keyword args
:attr:`rename_map`.
Examples::
>>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> renamed_imgs = imgs.rename(N='batch', C='channels')
>>> renamed_imgs.names
('batch', 'channels', 'H', 'W')
>>> renamed_imgs = imgs.rename(None)
>>> renamed_imgs.names
(None, None, None, None)
>>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width')
>>> renamed_imgs.names
('batch', 'channel', 'height', 'width')
.. warning::
The named tensor API is experimental and subject to change.
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.rename, (self,), self, *names, **rename_map
)
# See Note [rename_ / rename API]
return update_names(self, names, rename_map, inplace=False)
def to_sparse_coo(self):
"""Convert a tensor to :ref:`coordinate format <sparse-coo-docs>`.
Examples::
>>> dense = torch.randn(5, 5)
>>> sparse = dense.to_sparse_coo()
>>> sparse._nnz()
25
"""
return self.to_sparse()
def _update_names(self, names, inplace):
if has_torch_function_unary(self):
return handle_torch_function(
Tensor._update_names, (self,), self, names, inplace
)
# See Note [rename_ / rename API]
if inplace:
return super(Tensor, self).rename_(names)
else:
return super(Tensor, self).rename(names)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
"""
This __torch_function__ implementation wraps subclasses such that
methods called on subclasses return a subclass instance instead of
a ``torch.Tensor`` instance.
One corollary to this is that you need coverage for torch.Tensor
methods if implementing __torch_function__ for subclasses.
We recommend always calling ``super().__torch_function__`` as the base
case when doing the above.
While not mandatory, we recommend making `__torch_function__` a classmethod.
"""
if kwargs is None:
kwargs = {}
if not all(issubclass(cls, t) for t in types):
return NotImplemented
with _C.DisableTorchFunction():
ret = func(*args, **kwargs)
if func in get_default_nowrap_functions():
return ret
else:
return _convert(ret, cls)
__torch_dispatch__ = _C._disabled_torch_dispatch_impl
def __dlpack__(self, stream=None):
"""
Creates a DLpack `capsule https://data-apis.org/array-api/latest/design_topics/data_interchange.html#data-interchange`_
of the current tensor to be exported to other libraries.
This function will be called from the `from_dlpack` method
of the library that will consume the capsule. `from_dlpack` passes the current
stream to this method as part of the specification.
Args:
stream (integer or None): An optional Python integer representing a
pointer to a CUDA stream. The current stream is synchronized with
this stream before the capsule is created, and since the capsule
shares its storage with the tensor this make it safe to access from
both streams. If None or -1 is passed then no synchronization is performed.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__dlpack__, (self,), self, stream)
# DLPack capsules can't capture all of PyTorch's semantics,
# so we prohibit exporting tensors that would lose their properties like
# requires_grad and having the conjugate bit set.
if self.requires_grad:
raise RuntimeError(
"Can't export tensors that require gradient, use tensor.detach()"
)
if self.is_conj():
raise RuntimeError("Can't export tensors with the conjugate bit set")
if self.layout != torch.strided:
raise RuntimeError(
"Can't export tensors with layout other than torch.strided"
)
if stream is not None and type(stream) is not int:
# Stream pointers in CUDA/ROCm are uniquely numbered and can
# be retrieved from their integer value.
raise TypeError("stream must be ``int`` or ``none``")
elif stream is not None and stream != -1:
if self.device.type == "cuda":
stream = torch.cuda.ExternalStream(stream)
# Only synchronize on different streams
if stream != torch.cuda.current_stream:
event = torch.cuda.Event()
event.record(torch.cuda.current_stream())
stream.wait_event(event)
return torch.to_dlpack(self)
def __dlpack_device__(self) -> Tuple[enum.IntEnum, int]:
# Avoid circular import
from torch.utils.dlpack import DLDeviceType
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__dlpack_device__, (self,), self)
idx = self.device.index if self.device.index is not None else 0
if self.device.type == "cuda" and torch.version.hip is not None:
device_type = DLDeviceType.kDLROCM
elif self.device.type == "cpu" and self.is_pinned():
device_type = DLDeviceType.kDLCPUPinned
elif self.device.type == "cuda":
device_type = DLDeviceType.kDLGPU
elif self.device.type == "cpu":
device_type = DLDeviceType.kDLCPU
else:
raise ValueError(
"Unknown device type {} for Dlpack".format(self.device.type)
)
return (device_type, idx)
__module__ = "torch"
def _convert(ret, cls):
if cls is Tensor:
return ret
if isinstance(ret, Tensor) and not isinstance(ret, cls):
ret = ret.as_subclass(cls)
if isinstance(ret, (tuple, list)):
# Also handles things like namedtuples
ret = type(ret)(_convert(r, cls) for r in ret)
return ret
|
pytorch-master
|
torch/_tensor.py
|
import contextlib
from typing import Generator
import warnings
from torch._C import default_generator
import torch
def set_rng_state(new_state: torch.Tensor) -> None:
r"""Sets the random number generator state.
.. note: This function only works for CPU. For CUDA, please use
torch.manual_seed(seed), which works for both CPU and CUDA.
Args:
new_state (torch.ByteTensor): The desired state
"""
default_generator.set_state(new_state)
def get_rng_state() -> torch.Tensor:
r"""Returns the random number generator state as a `torch.ByteTensor`."""
return default_generator.get_state()
def manual_seed(seed) -> torch._C.Generator:
r"""Sets the seed for generating random numbers. Returns a
`torch.Generator` object.
Args:
seed (int): The desired seed. Value must be within the inclusive range
`[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
is raised. Negative inputs are remapped to positive values with the formula
`0xffff_ffff_ffff_ffff + seed`.
"""
seed = int(seed)
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return default_generator.manual_seed(seed)
def seed() -> int:
r"""Sets the seed for generating random numbers to a non-deterministic
random number. Returns a 64 bit number used to seed the RNG.
"""
seed = default_generator.seed()
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return seed
def initial_seed() -> int:
r"""Returns the initial seed for generating random numbers as a
Python `long`.
"""
return default_generator.initial_seed()
_fork_rng_warned_already = False
@contextlib.contextmanager
def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices") -> Generator:
"""
Forks the RNG, so that when you return, the RNG is reset
to the state that it was previously in.
Args:
devices (iterable of CUDA IDs): CUDA devices for which to fork
the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates
on all devices, but will emit a warning if your machine has a lot
of devices, since this function will run very slowly in that case.
If you explicitly specify devices, this warning will be suppressed
enabled (bool): if ``False``, the RNG is not forked. This is a convenience
argument for easily disabling the context manager without having
to delete it and unindent your Python code under it.
"""
import torch.cuda
global _fork_rng_warned_already
# Internal arguments:
# _caller: the function which called fork_rng, which the user used
# _devices_kw: the devices keyword of _caller
if not enabled:
yield
return
if devices is None:
num_devices = torch.cuda.device_count()
if num_devices > 1 and not _fork_rng_warned_already:
warnings.warn(
("CUDA reports that you have {num_devices} available devices, and you "
"have used {caller} without explicitly specifying which devices are being used. "
"For safety, we initialize *every* CUDA device by default, which "
"can be quite slow if you have a lot of GPUs. If you know that you are only "
"making use of a few CUDA devices, set the environment variable CUDA_VISIBLE_DEVICES "
"or the '{devices_kw}' keyword argument of {caller} with the set of devices "
"you are actually using. For example, if you are using CPU only, "
"set CUDA_VISIBLE_DEVICES= or devices=[]; if you are using "
"GPU 0 only, set CUDA_VISIBLE_DEVICES=0 or devices=[0]. To initialize "
"all devices and suppress this warning, set the '{devices_kw}' keyword argument "
"to `range(torch.cuda.device_count())`."
).format(num_devices=num_devices, caller=_caller, devices_kw=_devices_kw))
_fork_rng_warned_already = True
devices = list(range(num_devices))
else:
# Protect against user passing us a generator; we need to traverse this
# multiple times but a generator will be exhausted upon first traversal
devices = list(devices)
cpu_rng_state = torch.get_rng_state()
gpu_rng_states = []
for device in devices:
gpu_rng_states.append(torch.cuda.get_rng_state(device))
try:
yield
finally:
torch.set_rng_state(cpu_rng_state)
for device, gpu_rng_state in zip(devices, gpu_rng_states):
torch.cuda.set_rng_state(gpu_rng_state, device)
|
pytorch-master
|
torch/random.py
|
import errno
import hashlib
import json
import os
import re
import shutil
import sys
import tempfile
import torch
import warnings
import zipfile
from pathlib import Path
from typing import Callable, Dict, Optional, Union, Any
from urllib.error import HTTPError
from urllib.request import urlopen, Request
from urllib.parse import urlparse # noqa: F401
try:
from tqdm.auto import tqdm # automatically select proper tqdm submodule if available
except ImportError:
try:
from tqdm import tqdm
except ImportError:
# fake tqdm if it's not installed
class tqdm(object): # type: ignore[no-redef]
def __init__(self, total=None, disable=False,
unit=None, unit_scale=None, unit_divisor=None):
self.total = total
self.disable = disable
self.n = 0
# ignore unit, unit_scale, unit_divisor; they're just for real tqdm
def update(self, n):
if self.disable:
return
self.n += n
if self.total is None:
sys.stderr.write("\r{0:.1f} bytes".format(self.n))
else:
sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(self.total)))
sys.stderr.flush()
def close(self):
self.disable = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.disable:
return
sys.stderr.write('\n')
__all__ = [
'download_url_to_file',
'get_dir',
'help',
'list',
'load',
'load_state_dict_from_url',
'set_dir',
]
# matches bfd8deac from resnet18-bfd8deac.pth
HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
_TRUSTED_REPO_OWNERS = ("facebookresearch", "facebookincubator", "pytorch", "fairinternal")
ENV_GITHUB_TOKEN = 'GITHUB_TOKEN'
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
VAR_DEPENDENCY = 'dependencies'
MODULE_HUBCONF = 'hubconf.py'
READ_DATA_CHUNK = 8192
_hub_dir = None
# Copied from tools/shared/module_loader to be included in torch package
def _import_module(name, path):
import importlib.util
from importlib.abc import Loader
spec = importlib.util.spec_from_file_location(name, path)
assert spec is not None
module = importlib.util.module_from_spec(spec)
assert isinstance(spec.loader, Loader)
spec.loader.exec_module(module)
return module
def _remove_if_exists(path):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
def _git_archive_link(repo_owner, repo_name, ref):
# See https://docs.github.com/en/rest/reference/repos#download-a-repository-archive-zip
return f"https://github.com/{repo_owner}/{repo_name}/zipball/{ref}"
def _load_attr_from_module(module, func_name):
# Check if callable is defined in the module
if func_name not in dir(module):
return None
return getattr(module, func_name)
def _get_torch_home():
torch_home = os.path.expanduser(
os.getenv(ENV_TORCH_HOME,
os.path.join(os.getenv(ENV_XDG_CACHE_HOME,
DEFAULT_CACHE_DIR), 'torch')))
return torch_home
def _parse_repo_info(github):
if ':' in github:
repo_info, ref = github.split(':')
else:
repo_info, ref = github, None
repo_owner, repo_name = repo_info.split('/')
if ref is None:
# The ref wasn't specified by the user, so we need to figure out the
# default branch: main or master. Our assumption is that if main exists
# then it's the default branch, otherwise it's master.
try:
with urlopen(f"https://github.com/{repo_owner}/{repo_name}/tree/main/"):
ref = 'main'
except HTTPError as e:
if e.code == 404:
ref = 'master'
else:
raise
return repo_owner, repo_name, ref
def _read_url(url):
with urlopen(url) as r:
return r.read().decode(r.headers.get_content_charset('utf-8'))
def _validate_not_a_forked_repo(repo_owner, repo_name, ref):
# Use urlopen to avoid depending on local git.
headers = {'Accept': 'application/vnd.github.v3+json'}
token = os.environ.get(ENV_GITHUB_TOKEN)
if token is not None:
headers['Authorization'] = f'token {token}'
for url_prefix in (
f'https://api.github.com/repos/{repo_owner}/{repo_name}/branches',
f'https://api.github.com/repos/{repo_owner}/{repo_name}/tags'):
page = 0
while True:
page += 1
url = f'{url_prefix}?per_page=100&page={page}'
response = json.loads(_read_url(Request(url, headers=headers)))
# Empty response means no more data to process
if not response:
break
for br in response:
if br['name'] == ref or br['commit']['sha'].startswith(ref):
return
raise ValueError(f'Cannot find {ref} in https://github.com/{repo_owner}/{repo_name}. '
'If it\'s a commit from a forked repo, please call hub.load() with forked repo directly.')
def _get_cache_or_reload(github, force_reload, trust_repo, calling_fn, verbose=True, skip_validation=False):
# Setup hub_dir to save downloaded files
hub_dir = get_dir()
if not os.path.exists(hub_dir):
os.makedirs(hub_dir)
# Parse github repo information
repo_owner, repo_name, ref = _parse_repo_info(github)
# Github allows branch name with slash '/',
# this causes confusion with path on both Linux and Windows.
# Backslash is not allowed in Github branch name so no need to
# to worry about it.
normalized_br = ref.replace('/', '_')
# Github renames folder repo-v1.x.x to repo-1.x.x
# We don't know the repo name before downloading the zip file
# and inspect name from it.
# To check if cached repo exists, we need to normalize folder names.
owner_name_branch = '_'.join([repo_owner, repo_name, normalized_br])
repo_dir = os.path.join(hub_dir, owner_name_branch)
# Check that the repo is in the trusted list
_check_repo_is_trusted(repo_owner, repo_name, owner_name_branch, trust_repo=trust_repo, calling_fn=calling_fn)
use_cache = (not force_reload) and os.path.exists(repo_dir)
if use_cache:
if verbose:
sys.stderr.write('Using cache found in {}\n'.format(repo_dir))
else:
# Validate the tag/branch is from the original repo instead of a forked repo
if not skip_validation:
_validate_not_a_forked_repo(repo_owner, repo_name, ref)
cached_file = os.path.join(hub_dir, normalized_br + '.zip')
_remove_if_exists(cached_file)
try:
url = _git_archive_link(repo_owner, repo_name, ref)
sys.stderr.write('Downloading: \"{}\" to {}\n'.format(url, cached_file))
download_url_to_file(url, cached_file, progress=False)
except HTTPError as err:
if err.code == 300:
# Getting a 300 Multiple Choices error likely means that the ref is both a tag and a branch
# in the repo. This can be disambiguated by explicitely using refs/heads/ or refs/tags
# See https://git-scm.com/book/en/v2/Git-Internals-Git-References
# Here, we do the same as git: we throw a warning, and assume the user wanted the branch
warnings.warn(
f"The ref {ref} is ambiguous. Perhaps it is both a tag and a branch in the repo? "
"Torchhub will now assume that it's a branch. "
"You can disambiguate tags and branches by explicitly passing refs/heads/branch_name or "
"refs/tags/tag_name as the ref. That might require using skip_validation=True."
)
disambiguated_branch_ref = f"refs/heads/{ref}"
url = _git_archive_link(repo_owner, repo_name, ref=disambiguated_branch_ref)
download_url_to_file(url, cached_file, progress=False)
else:
raise
with zipfile.ZipFile(cached_file) as cached_zipfile:
extraced_repo_name = cached_zipfile.infolist()[0].filename
extracted_repo = os.path.join(hub_dir, extraced_repo_name)
_remove_if_exists(extracted_repo)
# Unzip the code and rename the base folder
cached_zipfile.extractall(hub_dir)
_remove_if_exists(cached_file)
_remove_if_exists(repo_dir)
shutil.move(extracted_repo, repo_dir) # rename the repo
return repo_dir
def _check_repo_is_trusted(repo_owner, repo_name, owner_name_branch, trust_repo, calling_fn="load"):
hub_dir = get_dir()
filepath = os.path.join(hub_dir, "trusted_list")
if not os.path.exists(filepath):
Path(filepath).touch()
with open(filepath, 'r') as file:
trusted_repos = tuple(line.strip() for line in file)
# To minimize friction of introducing the new trust_repo mechanism, we consider that
# if a repo was already downloaded by torchhub, then it is already trusted (even if it's not in the allowlist)
trusted_repos_legacy = next(os.walk(hub_dir))[1]
owner_name = '_'.join([repo_owner, repo_name])
is_trusted = (
owner_name in trusted_repos
or owner_name_branch in trusted_repos_legacy
or repo_owner in _TRUSTED_REPO_OWNERS
)
# TODO: Remove `None` option in 1.14 and change the default to "check"
if trust_repo is None:
if not is_trusted:
warnings.warn(
"You are about to download and run code from an untrusted repository. In a future release, this won't "
"be allowed. To add the repository to your trusted list, change the command to {calling_fn}(..., "
"trust_repo=False) and a command prompt will appear asking for an explicit confirmation of trust, "
f"or {calling_fn}(..., trust_repo=True), which will assume that the prompt is to be answered with "
f"'yes'. You can also use {calling_fn}(..., trust_repo='check') which will only prompt for "
f"confirmation if the repo is not already trusted. This will eventually be the default behaviour")
return
if (trust_repo is False) or (trust_repo == "check" and not is_trusted):
response = input(
f"The repository {owner_name} does not belong to the list of trusted repositories and as such cannot be downloaded. "
"Do you trust this repository and wish to add it to the trusted list of repositories (y/N)?")
if response.lower() in ("y", "yes"):
if is_trusted:
print("The repository is already trusted.")
elif response.lower() in ("n", "no", ""):
raise Exception("Untrusted repository.")
else:
raise ValueError(f"Unrecognized response {response}.")
# At this point we're sure that the user trusts the repo (or wants to trust it)
if not is_trusted:
with open(filepath, "a") as file:
file.write(owner_name + "\n")
def _check_module_exists(name):
import importlib.util
return importlib.util.find_spec(name) is not None
def _check_dependencies(m):
dependencies = _load_attr_from_module(m, VAR_DEPENDENCY)
if dependencies is not None:
missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)]
if len(missing_deps):
raise RuntimeError('Missing dependencies: {}'.format(', '.join(missing_deps)))
def _load_entry_from_hubconf(m, model):
if not isinstance(model, str):
raise ValueError('Invalid input: model should be a string of function name')
# Note that if a missing dependency is imported at top level of hubconf, it will
# throw before this function. It's a chicken and egg situation where we have to
# load hubconf to know what're the dependencies, but to import hubconf it requires
# a missing package. This is fine, Python will throw proper error message for users.
_check_dependencies(m)
func = _load_attr_from_module(m, model)
if func is None or not callable(func):
raise RuntimeError('Cannot find callable {} in hubconf'.format(model))
return func
def get_dir():
r"""
Get the Torch Hub cache directory used for storing downloaded models & weights.
If :func:`~torch.hub.set_dir` is not called, default path is ``$TORCH_HOME/hub`` where
environment variable ``$TORCH_HOME`` defaults to ``$XDG_CACHE_HOME/torch``.
``$XDG_CACHE_HOME`` follows the X Design Group specification of the Linux
filesystem layout, with a default value ``~/.cache`` if the environment
variable is not set.
"""
# Issue warning to move data if old env is set
if os.getenv('TORCH_HUB'):
warnings.warn('TORCH_HUB is deprecated, please use env TORCH_HOME instead')
if _hub_dir is not None:
return _hub_dir
return os.path.join(_get_torch_home(), 'hub')
def set_dir(d):
r"""
Optionally set the Torch Hub directory used to save downloaded models & weights.
Args:
d (str): path to a local folder to save downloaded models & weights.
"""
global _hub_dir
_hub_dir = os.path.expanduser(d)
def list(github, force_reload=False, skip_validation=False, trust_repo=None):
r"""
List all callable entrypoints available in the repo specified by ``github``.
Args:
github (str): a string with format "repo_owner/repo_name[:ref]" with an optional
ref (tag or branch). If ``ref`` is not specified, the default branch is assumed to be ``main`` if
it exists, and otherwise ``master``.
Example: 'pytorch/vision:0.10'
force_reload (bool, optional): whether to discard the existing cache and force a fresh download.
Default is ``False``.
skip_validation (bool, optional): if ``False``, torchhub will check that the branch or commit
specified by the ``github`` argument properly belongs to the repo owner. This will make
requests to the GitHub API; you can specify a non-default GitHub token by setting the
``GITHUB_TOKEN`` environment variable. Default is ``False``.
trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``.
This parameter was introduced in v1.12 and helps ensuring that users
only run code from repos that they trust.
- If ``False``, a prompt will ask the user whether the repo should
be trusted.
- If ``True``, the repo will be added to the trusted list and loaded
without requiring explicit confirmation.
- If ``"check"``, the repo will be checked against the list of
trusted repos in the cache. If it is not present in that list, the
behaviour will fall back onto the ``trust_repo=False`` option.
- If ``None``: this will raise a warning, inviting the user to set
``trust_repo`` to either ``False``, ``True`` or ``"check"``. This
is only present for backward compatibility and will be removed in
v1.14.
Default is ``None`` and will eventually change to ``"check"`` in v1.14.
Returns:
list: The available callables entrypoint
Example:
>>> entrypoints = torch.hub.list('pytorch/vision', force_reload=True)
"""
repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, "list", verbose=True,
skip_validation=skip_validation)
sys.path.insert(0, repo_dir)
hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF)
hub_module = _import_module(MODULE_HUBCONF, hubconf_path)
sys.path.remove(repo_dir)
# We take functions starts with '_' as internal helper functions
entrypoints = [f for f in dir(hub_module) if callable(getattr(hub_module, f)) and not f.startswith('_')]
return entrypoints
def help(github, model, force_reload=False, skip_validation=False, trust_repo=None):
r"""
Show the docstring of entrypoint ``model``.
Args:
github (str): a string with format <repo_owner/repo_name[:ref]> with an optional
ref (a tag or a branch). If ``ref`` is not specified, the default branch is assumed
to be ``main`` if it exists, and otherwise ``master``.
Example: 'pytorch/vision:0.10'
model (str): a string of entrypoint name defined in repo's ``hubconf.py``
force_reload (bool, optional): whether to discard the existing cache and force a fresh download.
Default is ``False``.
skip_validation (bool, optional): if ``False``, torchhub will check that the ref
specified by the ``github`` argument properly belongs to the repo owner. This will make
requests to the GitHub API; you can specify a non-default GitHub token by setting the
``GITHUB_TOKEN`` environment variable. Default is ``False``.
trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``.
This parameter was introduced in v1.12 and helps ensuring that users
only run code from repos that they trust.
- If ``False``, a prompt will ask the user whether the repo should
be trusted.
- If ``True``, the repo will be added to the trusted list and loaded
without requiring explicit confirmation.
- If ``"check"``, the repo will be checked against the list of
trusted repos in the cache. If it is not present in that list, the
behaviour will fall back onto the ``trust_repo=False`` option.
- If ``None``: this will raise a warning, inviting the user to set
``trust_repo`` to either ``False``, ``True`` or ``"check"``. This
is only present for backward compatibility and will be removed in
v1.14.
Default is ``None`` and will eventually change to ``"check"`` in v1.14.
Example:
>>> print(torch.hub.help('pytorch/vision', 'resnet18', force_reload=True))
"""
repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, "help", verbose=True,
skip_validation=skip_validation)
sys.path.insert(0, repo_dir)
hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF)
hub_module = _import_module(MODULE_HUBCONF, hubconf_path)
sys.path.remove(repo_dir)
entry = _load_entry_from_hubconf(hub_module, model)
return entry.__doc__
def load(repo_or_dir, model, *args, source='github', trust_repo=None, force_reload=False, verbose=True,
skip_validation=False,
**kwargs):
r"""
Load a model from a github repo or a local directory.
Note: Loading a model is the typical use case, but this can also be used to
for loading other objects such as tokenizers, loss functions, etc.
If ``source`` is 'github', ``repo_or_dir`` is expected to be
of the form ``repo_owner/repo_name[:ref]`` with an optional
ref (a tag or a branch).
If ``source`` is 'local', ``repo_or_dir`` is expected to be a
path to a local directory.
Args:
repo_or_dir (str): If ``source`` is 'github',
this should correspond to a github repo with format ``repo_owner/repo_name[:ref]`` with
an optional ref (tag or branch), for example 'pytorch/vision:0.10'. If ``ref`` is not specified,
the default branch is assumed to be ``main`` if it exists, and otherwise ``master``.
If ``source`` is 'local' then it should be a path to a local directory.
model (str): the name of a callable (entrypoint) defined in the
repo/dir's ``hubconf.py``.
*args (optional): the corresponding args for callable ``model``.
source (str, optional): 'github' or 'local'. Specifies how
``repo_or_dir`` is to be interpreted. Default is 'github'.
trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``.
This parameter was introduced in v1.12 and helps ensuring that users
only run code from repos that they trust.
- If ``False``, a prompt will ask the user whether the repo should
be trusted.
- If ``True``, the repo will be added to the trusted list and loaded
without requiring explicit confirmation.
- If ``"check"``, the repo will be checked against the list of
trusted repos in the cache. If it is not present in that list, the
behaviour will fall back onto the ``trust_repo=False`` option.
- If ``None``: this will raise a warning, inviting the user to set
``trust_repo`` to either ``False``, ``True`` or ``"check"``. This
is only present for backward compatibility and will be removed in
v1.14.
Default is ``None`` and will eventually change to ``"check"`` in v1.14.
force_reload (bool, optional): whether to force a fresh download of
the github repo unconditionally. Does not have any effect if
``source = 'local'``. Default is ``False``.
verbose (bool, optional): If ``False``, mute messages about hitting
local caches. Note that the message about first download cannot be
muted. Does not have any effect if ``source = 'local'``.
Default is ``True``.
skip_validation (bool, optional): if ``False``, torchhub will check that the branch or commit
specified by the ``github`` argument properly belongs to the repo owner. This will make
requests to the GitHub API; you can specify a non-default GitHub token by setting the
``GITHUB_TOKEN`` environment variable. Default is ``False``.
**kwargs (optional): the corresponding kwargs for callable ``model``.
Returns:
The output of the ``model`` callable when called with the given
``*args`` and ``**kwargs``.
Example:
>>> # from a github repo
>>> repo = 'pytorch/vision'
>>> model = torch.hub.load(repo, 'resnet50', pretrained=True)
>>> # from a local directory
>>> path = '/some/local/path/pytorch/vision'
>>> # xdoctest: +SKIP
>>> model = torch.hub.load(path, 'resnet50', pretrained=True)
"""
source = source.lower()
if source not in ('github', 'local'):
raise ValueError(
f'Unknown source: "{source}". Allowed values: "github" | "local".')
if source == 'github':
repo_or_dir = _get_cache_or_reload(repo_or_dir, force_reload, trust_repo, "load",
verbose=verbose, skip_validation=skip_validation)
model = _load_local(repo_or_dir, model, *args, **kwargs)
return model
def _load_local(hubconf_dir, model, *args, **kwargs):
r"""
Load a model from a local directory with a ``hubconf.py``.
Args:
hubconf_dir (str): path to a local directory that contains a
``hubconf.py``.
model (str): name of an entrypoint defined in the directory's
``hubconf.py``.
*args (optional): the corresponding args for callable ``model``.
**kwargs (optional): the corresponding kwargs for callable ``model``.
Returns:
a single model with corresponding pretrained weights.
Example:
>>> path = '/some/local/path/pytorch/vision'
>>> # xdoctest: +SKIP
>>> model = _load_local(path, 'resnet50', pretrained=True)
"""
sys.path.insert(0, hubconf_dir)
hubconf_path = os.path.join(hubconf_dir, MODULE_HUBCONF)
hub_module = _import_module(MODULE_HUBCONF, hubconf_path)
entry = _load_entry_from_hubconf(hub_module, model)
model = entry(*args, **kwargs)
sys.path.remove(hubconf_dir)
return model
def download_url_to_file(url, dst, hash_prefix=None, progress=True):
r"""Download object at the given URL to a local path.
Args:
url (str): URL of the object to download
dst (str): Full path where object will be saved, e.g. ``/tmp/temporary_file``
hash_prefix (str, optional): If not None, the SHA256 downloaded file should start with ``hash_prefix``.
Default: None
progress (bool, optional): whether or not to display a progress bar to stderr
Default: True
Example:
>>> torch.hub.download_url_to_file('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth', '/tmp/temporary_file')
"""
file_size = None
req = Request(url, headers={"User-Agent": "torch.hub"})
u = urlopen(req)
meta = u.info()
if hasattr(meta, 'getheaders'):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
# We deliberately save it in a temp file and move it after
# download is complete. This prevents a local working checkpoint
# being overridden by a broken download.
dst = os.path.expanduser(dst)
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
if hash_prefix is not None:
sha256 = hashlib.sha256()
with tqdm(total=file_size, disable=not progress,
unit='B', unit_scale=True, unit_divisor=1024) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
if hash_prefix is not None:
sha256.update(buffer)
pbar.update(len(buffer))
f.close()
if hash_prefix is not None:
digest = sha256.hexdigest()
if digest[:len(hash_prefix)] != hash_prefix:
raise RuntimeError('invalid hash value (expected "{}", got "{}")'
.format(hash_prefix, digest))
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
# Hub used to support automatically extracts from zipfile manually compressed by users.
# The legacy zip format expects only one file from torch.save() < 1.6 in the zip.
# We should remove this support since zipfile is now default zipfile format for torch.save().
def _is_legacy_zip_format(filename):
if zipfile.is_zipfile(filename):
infolist = zipfile.ZipFile(filename).infolist()
return len(infolist) == 1 and not infolist[0].is_dir()
return False
def _legacy_zip_load(filename, model_dir, map_location):
warnings.warn('Falling back to the old format < 1.6. This support will be '
'deprecated in favor of default zipfile format introduced in 1.6. '
'Please redo torch.save() to save it in the new zipfile format.')
# Note: extractall() defaults to overwrite file if exists. No need to clean up beforehand.
# We deliberately don't handle tarfile here since our legacy serialization format was in tar.
# E.g. resnet18-5c106cde.pth which is widely used.
with zipfile.ZipFile(filename) as f:
members = f.infolist()
if len(members) != 1:
raise RuntimeError('Only one file(not dir) is allowed in the zipfile')
f.extractall(model_dir)
extraced_name = members[0].filename
extracted_file = os.path.join(model_dir, extraced_name)
return torch.load(extracted_file, map_location=map_location)
def load_state_dict_from_url(
url: str,
model_dir: Optional[str] = None,
map_location: Optional[Union[Callable[[str], str], Dict[str, str]]] = None,
progress: bool = True,
check_hash: bool = False,
file_name: Optional[str] = None
) -> Dict[str, Any]:
r"""Loads the Torch serialized object at the given URL.
If downloaded file is a zip file, it will be automatically
decompressed.
If the object is already present in `model_dir`, it's deserialized and
returned.
The default value of ``model_dir`` is ``<hub_dir>/checkpoints`` where
``hub_dir`` is the directory returned by :func:`~torch.hub.get_dir`.
Args:
url (str): URL of the object to download
model_dir (str, optional): directory in which to save the object
map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load)
progress (bool, optional): whether or not to display a progress bar to stderr.
Default: True
check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
Default: False
file_name (str, optional): name for the downloaded file. Filename from ``url`` will be used if not set.
Example:
>>> state_dict = torch.hub.load_state_dict_from_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
# Issue warning to move data if old env is set
if os.getenv('TORCH_MODEL_ZOO'):
warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')
if model_dir is None:
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
parts = urlparse(url)
filename = os.path.basename(parts.path)
if file_name is not None:
filename = file_name
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
if check_hash:
r = HASH_REGEX.search(filename) # r is Optional[Match[str]]
hash_prefix = r.group(1) if r else None
download_url_to_file(url, cached_file, hash_prefix, progress=progress)
if _is_legacy_zip_format(cached_file):
return _legacy_zip_load(cached_file, model_dir, map_location)
return torch.load(cached_file, map_location=map_location)
|
pytorch-master
|
torch/hub.py
|
import math
from typing import Optional
import torch
from torch._six import inf
class __PrinterOptions(object):
precision: int = 4
threshold: float = 1000
edgeitems: int = 3
linewidth: int = 80
sci_mode: Optional[bool] = None
PRINT_OPTS = __PrinterOptions()
# We could use **kwargs, but this will give better docs
def set_printoptions(
precision=None,
threshold=None,
edgeitems=None,
linewidth=None,
profile=None,
sci_mode=None,
):
r"""Set options for printing. Items shamelessly taken from NumPy
Args:
precision: Number of digits of precision for floating point output
(default = 4).
threshold: Total number of array elements which trigger summarization
rather than full `repr` (default = 1000).
edgeitems: Number of array items in summary at beginning and end of
each dimension (default = 3).
linewidth: The number of characters per line for the purpose of
inserting line breaks (default = 80). Thresholded matrices will
ignore this parameter.
profile: Sane defaults for pretty printing. Can override with any of
the above options. (any one of `default`, `short`, `full`)
sci_mode: Enable (True) or disable (False) scientific notation. If
None (default) is specified, the value is defined by
`torch._tensor_str._Formatter`. This value is automatically chosen
by the framework.
Example::
>>> torch.set_printoptions(precision=2)
>>> torch.tensor([1.12345])
tensor([1.12])
>>> torch.set_printoptions(threshold=5)
>>> torch.arange(10)
tensor([0, 1, 2, ..., 7, 8, 9])
"""
if profile is not None:
if profile == "default":
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
elif profile == "short":
PRINT_OPTS.precision = 2
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 2
PRINT_OPTS.linewidth = 80
elif profile == "full":
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = inf
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
if precision is not None:
PRINT_OPTS.precision = precision
if threshold is not None:
PRINT_OPTS.threshold = threshold
if edgeitems is not None:
PRINT_OPTS.edgeitems = edgeitems
if linewidth is not None:
PRINT_OPTS.linewidth = linewidth
PRINT_OPTS.sci_mode = sci_mode
def tensor_totype(t):
dtype = torch.float if t.is_mps else torch.double
return t.to(dtype=dtype)
class _Formatter(object):
def __init__(self, tensor):
self.floating_dtype = tensor.dtype.is_floating_point
self.int_mode = True
self.sci_mode = False
self.max_width = 1
with torch.no_grad():
tensor_view = tensor.reshape(-1)
if not self.floating_dtype:
for value in tensor_view:
value_str = "{}".format(value)
self.max_width = max(self.max_width, len(value_str))
else:
nonzero_finite_vals = torch.masked_select(
tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)
)
if nonzero_finite_vals.numel() == 0:
# no valid number, do nothing
return
# Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.
nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs())
nonzero_finite_min = tensor_totype(nonzero_finite_abs.min())
nonzero_finite_max = tensor_totype(nonzero_finite_abs.max())
for value in nonzero_finite_vals:
if value != torch.ceil(value):
self.int_mode = False
break
if self.int_mode:
# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
# to indicate that the tensor is of floating type. add 1 to the len to account for this.
if (
nonzero_finite_max / nonzero_finite_min > 1000.0
or nonzero_finite_max > 1.0e8
):
self.sci_mode = True
for value in nonzero_finite_vals:
value_str = (
("{{:.{}e}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = ("{:.0f}").format(value)
self.max_width = max(self.max_width, len(value_str) + 1)
else:
# Check if scientific representation should be used.
if (
nonzero_finite_max / nonzero_finite_min > 1000.0
or nonzero_finite_max > 1.0e8
or nonzero_finite_min < 1.0e-4
):
self.sci_mode = True
for value in nonzero_finite_vals:
value_str = (
("{{:.{}e}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = (
("{{:.{}f}}").format(PRINT_OPTS.precision).format(value)
)
self.max_width = max(self.max_width, len(value_str))
if PRINT_OPTS.sci_mode is not None:
self.sci_mode = PRINT_OPTS.sci_mode
def width(self):
return self.max_width
def format(self, value):
if self.floating_dtype:
if self.sci_mode:
ret = (
("{{:{}.{}e}}")
.format(self.max_width, PRINT_OPTS.precision)
.format(value)
)
elif self.int_mode:
ret = "{:.0f}".format(value)
if not (math.isinf(value) or math.isnan(value)):
ret += "."
else:
ret = ("{{:.{}f}}").format(PRINT_OPTS.precision).format(value)
else:
ret = "{}".format(value)
return (self.max_width - len(ret)) * " " + ret
def _scalar_str(self, formatter1, formatter2=None):
if formatter2 is not None:
real_str = _scalar_str(self.real, formatter1)
imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip()
# handles negative numbers, +0.0, -0.0
if imag_str[0] == "+" or imag_str[0] == "-":
return real_str + imag_str
else:
return real_str + "+" + imag_str
else:
return formatter1.format(self.item())
def _vector_str(self, indent, summarize, formatter1, formatter2=None):
# length includes spaces and comma between elements
element_length = formatter1.width() + 2
if formatter2 is not None:
# width for imag_formatter + an extra j for complex
element_length += formatter2.width() + 1
elements_per_line = max(
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
)
char_per_line = element_length * elements_per_line
def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
if formatter2 is not None:
real_str = formatter1.format(val.real)
imag_str = (formatter2.format(val.imag) + "j").lstrip()
# handles negative numbers, +0.0, -0.0
if imag_str[0] == "+" or imag_str[0] == "-":
return real_str + imag_str
else:
return real_str + "+" + imag_str
else:
return formatter1.format(val)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
data = (
[_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()]
+ [" ..."]
+ [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()]
)
else:
data = [_val_formatter(val) for val in self.tolist()]
data_lines = [
data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
]
lines = [", ".join(line) for line in data_lines]
return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
# formatter2 is only used for printing complex tensors.
# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real
# and tensor.imag respesectively
def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None):
dim = self.dim()
if dim == 0:
return _scalar_str(self, formatter1, formatter2)
if dim == 1:
return _vector_str(self, indent, summarize, formatter1, formatter2)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
slices = (
[
_tensor_str_with_formatter(
self[i], indent + 1, summarize, formatter1, formatter2
)
for i in range(0, PRINT_OPTS.edgeitems)
]
+ ["..."]
+ [
_tensor_str_with_formatter(
self[i], indent + 1, summarize, formatter1, formatter2
)
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))
]
)
else:
slices = [
_tensor_str_with_formatter(
self[i], indent + 1, summarize, formatter1, formatter2
)
for i in range(0, self.size(0))
]
tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
return "[" + tensor_str + "]"
def _tensor_str(self, indent):
if self.numel() == 0:
return "[]"
if self.has_names():
# There are two main codepaths (possibly more) that tensor printing goes through:
# - tensor data can fit comfortably on screen
# - tensor data needs to be summarized
# Some of the codepaths don't fully support named tensors, so we send in
# an unnamed tensor to the formatting code as a workaround.
self = self.rename(None)
summarize = self.numel() > PRINT_OPTS.threshold
if self._is_zerotensor():
self = self.clone()
# handle the negative bit
if self.is_neg():
self = self.resolve_neg()
if self.dtype is torch.float16 or self.dtype is torch.bfloat16:
self = self.float()
if self.dtype is torch.complex32:
self = self.cfloat()
if self.dtype.is_complex:
# handle the conjugate bit
self = self.resolve_conj()
real_formatter = _Formatter(
get_summarized_data(self.real) if summarize else self.real
)
imag_formatter = _Formatter(
get_summarized_data(self.imag) if summarize else self.imag
)
return _tensor_str_with_formatter(
self, indent, summarize, real_formatter, imag_formatter
)
else:
formatter = _Formatter(get_summarized_data(self) if summarize else self)
return _tensor_str_with_formatter(self, indent, summarize, formatter)
def _add_suffixes(tensor_str, suffixes, indent, force_newline):
tensor_strs = [tensor_str]
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
for suffix in suffixes:
suffix_len = len(suffix)
if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
tensor_strs.append(",\n" + " " * indent + suffix)
last_line_len = indent + suffix_len
force_newline = False
else:
tensor_strs.append(", " + suffix)
last_line_len += suffix_len + 2
tensor_strs.append(")")
return "".join(tensor_strs)
def get_summarized_data(self):
dim = self.dim()
if dim == 0:
return self
if dim == 1:
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
return torch.cat(
(self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
)
else:
return self
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]
return torch.stack([get_summarized_data(x) for x in (start + end)])
else:
return torch.stack([get_summarized_data(x) for x in self])
def _str_intern(inp, *, tensor_contents=None):
is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter
if inp.is_nested:
prefix = "nested_tensor("
elif is_plain_tensor:
prefix = "tensor("
else:
prefix = f"{type(inp).__name__}("
indent = len(prefix)
suffixes = []
custom_contents_provided = tensor_contents is not None
if custom_contents_provided:
tensor_str = tensor_contents
# This is used to extract the primal value and thus disable the forward AD
# within this function.
# TODO(albanD) This needs to be updated when more than one level is supported
self, tangent = torch.autograd.forward_ad.unpack_dual(inp)
# Note [Print tensor device]:
# A general logic here is we only print device when it doesn't match
# the device specified in default tensor type.
# Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus
# torch._C._get_default_device() only returns either cpu or cuda.
# In other cases, we don't have a way to set them as default yet,
# and we should always print out device for them.
if (
self.device.type != torch._C._get_default_device()
or (
self.device.type == "cuda"
and torch.cuda.current_device() != self.device.index
)
or (self.device.type == "mps")
):
suffixes.append("device='" + str(self.device) + "'")
# Tensor printing performs tensor operations like slice, indexing, etc to make it in a
# representable format. These operations on ipu/xla/lazy tensor results in compilations. Hence,
# to avoid compilations, copying the tensor to cpu before printing.
if self.device.type in ["xla", "lazy", "ipu"]:
self = self.to("cpu")
# TODO: add an API to map real -> complex dtypes
_default_complex_dtype = (
torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat
)
has_default_dtype = self.dtype in (
torch.get_default_dtype(),
_default_complex_dtype,
torch.int64,
torch.bool,
)
if self.is_sparse:
suffixes.append("size=" + str(tuple(self.shape)))
from torch._subclasses.fake_tensor import FakeTensor
if not self.is_meta and not isinstance(self, FakeTensor):
suffixes.append("nnz=" + str(self._nnz()))
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
indices_prefix = "indices=tensor("
indices = self._indices().detach()
indices_str = _tensor_str(indices, indent + len(indices_prefix))
if indices.numel() == 0:
indices_str += ", size=" + str(tuple(indices.shape))
values_prefix = "values=tensor("
values = self._values().detach()
values_str = _tensor_str(values, indent + len(values_prefix))
if values.numel() == 0:
values_str += ", size=" + str(tuple(values.shape))
tensor_str = (
indices_prefix
+ indices_str
+ "),\n"
+ " " * indent
+ values_prefix
+ values_str
+ ")"
)
elif self.layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
suffixes.append("size=" + str(tuple(self.shape)))
suffixes.append("nnz=" + str(self._nnz()))
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
compressed_indices_method, plain_indices_method = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}[self.layout]
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
cdimname, pdimname = "row", "column"
else:
cdimname, pdimname = "column", "row"
compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor("
compressed_indices = compressed_indices_method(self).detach()
compressed_indices_str = _tensor_str(
compressed_indices, indent + len(compressed_indices_prefix)
)
if compressed_indices.numel() == 0:
compressed_indices_str += ", size=" + str(
tuple(compressed_indices.shape)
)
plain_indices_prefix = f"{pdimname[:3]}_indices=tensor("
plain_indices = plain_indices_method(self).detach()
plain_indices_str = _tensor_str(
plain_indices, indent + len(plain_indices_prefix)
)
if plain_indices.numel() == 0:
plain_indices_str += ", size=" + str(tuple(plain_indices.shape))
values_prefix = "values=tensor("
values = self.values().detach()
values_str = _tensor_str(values, indent + len(values_prefix))
if values.numel() == 0:
values_str += ", size=" + str(tuple(values.shape))
tensor_str = (
compressed_indices_prefix
+ compressed_indices_str
+ "),\n"
+ " " * indent
+ plain_indices_prefix
+ plain_indices_str
+ "),\n"
+ " " * indent
+ values_prefix
+ values_str
+ ")"
)
elif self.is_quantized:
suffixes.append("size=" + str(tuple(self.shape)))
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
suffixes.append("quantization_scheme=" + str(self.qscheme()))
if (
self.qscheme() == torch.per_tensor_affine
or self.qscheme() == torch.per_tensor_symmetric
):
suffixes.append("scale=" + str(self.q_scale()))
suffixes.append("zero_point=" + str(self.q_zero_point()))
elif (
self.qscheme() == torch.per_channel_affine
or self.qscheme() == torch.per_channel_symmetric
or self.qscheme() == torch.per_channel_affine_float_qparams
):
suffixes.append("scale=" + str(self.q_per_channel_scales()))
suffixes.append("zero_point=" + str(self.q_per_channel_zero_points()))
suffixes.append("axis=" + str(self.q_per_channel_axis()))
if not custom_contents_provided:
tensor_str = _tensor_str(self.dequantize(), indent)
elif self.is_nested:
if not custom_contents_provided:
def indented_str(s, indent):
return "\n".join(f" {line}" for line in s.split("\n"))
strs = ",\n".join(
indented_str(str(t), indent + 1)
for t in torch.ops.aten.unbind.int(self, 0)
)
tensor_str = f"[\n{strs}\n]"
elif torch._is_functional_tensor(self):
prefix = "_to_functional_tensor("
tensor_str = repr(torch._from_functional_tensor(self))
else:
if self.is_meta:
suffixes.append("size=" + str(tuple(self.shape)))
if self.dtype != torch.get_default_dtype():
suffixes.append("dtype=" + str(self.dtype))
# TODO: This implies that ellipses is valid syntax for allocating
# a meta tensor, which it could be, but it isn't right now
if not custom_contents_provided:
tensor_str = "..."
else:
if self.numel() == 0 and not self.is_sparse:
# Explicitly print the shape if it is not (0,), to match NumPy behavior
if self.dim() != 1:
suffixes.append("size=" + str(tuple(self.shape)))
# In an empty tensor, there are no elements to infer if the dtype
# should be int64, so it must be shown explicitly.
if self.dtype != torch.get_default_dtype():
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
tensor_str = "[]"
else:
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
if self.layout != torch.strided:
tensor_str = _tensor_str(self.to_dense(), indent)
else:
tensor_str = _tensor_str(self, indent)
if self.layout != torch.strided:
suffixes.append("layout=" + str(self.layout))
# Use inp here to get the original grad_fn and not the one generated by the forward grad
# unpacking.
if inp.grad_fn is not None:
name = type(inp.grad_fn).__name__
if name == "CppFunction":
name = inp.grad_fn.name().rsplit("::", 1)[-1]
suffixes.append("grad_fn=<{}>".format(name))
elif inp.requires_grad:
suffixes.append("requires_grad=True")
if self.has_names():
suffixes.append("names={}".format(self.names))
if tangent is not None:
suffixes.append("tangent={}".format(tangent))
string_repr = _add_suffixes(
prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse
)
# Check if this instance is flagged as a parameter and change the repr accordingly.
# Unfortunately, this function has to be aware of this detail.
# NB: This is currently skipped for plain tensor parameters to maintain BC. In the future,
# this should be done for those as well to produce a valid repr.
if isinstance(self, torch.nn.Parameter) and not is_plain_tensor:
string_repr = f"Parameter({string_repr})"
return string_repr
def _str(self, *, tensor_contents=None):
with torch.no_grad():
return _str_intern(self, tensor_contents=tensor_contents)
|
pytorch-master
|
torch/_tensor_str.py
|
import ast
import functools
import inspect
from textwrap import dedent
from typing import Any, List, NamedTuple, Optional, Tuple
from torch._C import ErrorReport
from torch._C._jit_tree_views import SourceRangeFactory
def get_source_lines_and_file(
obj: Any,
error_msg: Optional[str] = None,
) -> Tuple[List[str], int, Optional[str]]:
"""
Wrapper around inspect.getsourcelines and inspect.getsourcefile.
Returns: (sourcelines, file_lino, filename)
"""
filename = None # in case getsourcefile throws
try:
filename = inspect.getsourcefile(obj)
sourcelines, file_lineno = inspect.getsourcelines(obj)
except OSError as e:
msg = (
f"Can't get source for {obj}. TorchScript requires source access in "
"order to carry out compilation, make sure original .py files are "
"available."
)
if error_msg:
msg += "\n" + error_msg
raise OSError(msg) from e
return sourcelines, file_lineno, filename
def normalize_source_lines(sourcelines: List[str]) -> List[str]:
"""
This helper function accepts a list of source lines. It finds the
indentation level of the function definition (`def`), then it indents
all lines in the function body to a point at or greater than that
level. This allows for comments and continued string literals that
are at a lower indentation than the rest of the code.
Args:
sourcelines: function source code, separated into lines by
the '\n' character
Returns:
A list of source lines that have been correctly aligned
"""
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix) :]
# Find the line and line number containing the function definition
idx = None
for i, l in enumerate(sourcelines):
if l.lstrip().startswith("def"):
idx = i
break
# This will happen when the function is a lambda- we won't find "def" anywhere in the source
# lines in that case. Currently trying to JIT compile a lambda will throw an error up in
# `parse_def()`, but we might want to handle this case in the future.
if idx is None:
return sourcelines
# Get a string representing the amount of leading whitespace
fn_def = sourcelines[idx]
whitespace = fn_def.split("def")[0]
# Add this leading whitespace to all lines before and after the `def`
aligned_prefix = [
whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx]
]
aligned_suffix = [
whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1 :]
]
# Put it together again
aligned_prefix.append(fn_def)
return aligned_prefix + aligned_suffix
# Thin wrapper around SourceRangeFactory to store extra metadata
# about the function-to-be-compiled.
class SourceContext(SourceRangeFactory):
def __init__(
self,
source,
filename,
file_lineno,
leading_whitespace_len,
uses_true_division=True,
funcname=None,
):
super(SourceContext, self).__init__(
source, filename, file_lineno, leading_whitespace_len
)
self.uses_true_division = uses_true_division
self.filename = filename
self.funcname = funcname
@functools.lru_cache(maxsize=None)
def make_source_context(*args):
return SourceContext(*args)
def fake_range():
return SourceContext("", None, 0, 0).make_raw_range(0, 1)
class ParsedDef(NamedTuple):
ast: ast.Module
ctx: SourceContext
source: str
filename: Optional[str]
file_lineno: int
def parse_def(fn):
sourcelines, file_lineno, filename = get_source_lines_and_file(
fn, ErrorReport.call_stack()
)
sourcelines = normalize_source_lines(sourcelines)
source = "".join(sourcelines)
dedent_src = dedent(source)
py_ast = ast.parse(dedent_src)
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
raise RuntimeError(
f"Expected a single top-level function: {filename}:{file_lineno}"
)
leading_whitespace_len = len(source.split("\n", 1)[0]) - len(
dedent_src.split("\n", 1)[0]
)
ctx = make_source_context(
source, filename, file_lineno, leading_whitespace_len, True, fn.__name__
)
return ParsedDef(py_ast, ctx, source, filename, file_lineno)
|
pytorch-master
|
torch/_sources.py
|
import functools
import warnings
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.utils._pytree import _broadcast_to_and_flatten, tree_flatten, tree_unflatten
in_dims_t = Union[int, Tuple]
out_dims_t = Union[int, Tuple[int, ...]]
# Checks that all args-to-be-batched have the same batch dim size
def _validate_and_get_batch_size(
flat_in_dims: List[Optional[int]], flat_args: List
) -> int:
batch_sizes = [
arg.size(in_dim)
for in_dim, arg in zip(flat_in_dims, flat_args)
if in_dim is not None
]
if batch_sizes and any([size != batch_sizes[0] for size in batch_sizes]):
raise ValueError(
f"vmap: Expected all tensors to have the same size in the mapped "
f"dimension, got sizes {batch_sizes} for the mapped dimension"
)
return batch_sizes[0]
def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:
if isinstance(batched_outputs, tuple):
return len(batched_outputs)
return 1
# If value is a tuple, check it has length `num_elements`.
# If value is not a tuple, make a tuple with `value` repeated `num_elements` times
def _as_tuple(
value: Any, num_elements: int, error_message_lambda: Callable[[], str]
) -> Tuple:
if not isinstance(value, tuple):
return (value,) * num_elements
if len(value) != num_elements:
raise ValueError(error_message_lambda())
return value
# Creates BatchedTensors for every Tensor in arg that should be batched.
# Returns the (potentially) batched arguments and the batch_size.
def _create_batched_inputs(
in_dims: in_dims_t, args: Tuple, vmap_level: int, func: Callable
) -> Tuple[Tuple, int]:
if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
raise ValueError(
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
f"expected `in_dims` to be int or a (potentially nested) tuple "
f"matching the structure of inputs, got: {type(in_dims)}."
)
if len(args) == 0:
raise ValueError(
f"vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add "
f"inputs, or you are trying to vmap over a function with no inputs. "
f"The latter is unsupported."
)
flat_args, args_spec = tree_flatten(args)
flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)
if flat_in_dims is None:
raise ValueError(
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
f"in_dims is not compatible with the structure of `inputs`. "
f"in_dims has structure {tree_flatten(in_dims)[1]} but inputs "
f"has structure {args_spec}."
)
for arg, in_dim in zip(flat_args, flat_in_dims):
if not isinstance(in_dim, int) and in_dim is not None:
raise ValueError(
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
f"Got in_dim={in_dim} for an input but in_dim must be either "
f"an integer dimension or None."
)
if isinstance(in_dim, int) and not isinstance(arg, Tensor):
raise ValueError(
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
f"Got in_dim={in_dim} for an input but the input is of type "
f"{type(arg)}. We cannot vmap over non-Tensor arguments, "
f"please use None as the respective in_dim"
)
if in_dim is not None and (in_dim < 0 or in_dim >= arg.dim()):
raise ValueError(
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
f"Got in_dim={in_dim} for some input, but that input is a Tensor "
f"of dimensionality {arg.dim()} so expected in_dim to satisfy "
f"0 <= in_dim < {arg.dim()}."
)
batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)
# See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
batched_inputs = [
arg if in_dim is None else torch._add_batch_dim(arg, in_dim, vmap_level)
for in_dim, arg in zip(flat_in_dims, flat_args)
]
return tree_unflatten(batched_inputs, args_spec), batch_size
# Undos the batching (and any batch dimensions) associated with the `vmap_level`.
def _unwrap_batched(
batched_outputs: Union[Tensor, Tuple[Tensor, ...]],
out_dims: out_dims_t,
vmap_level: int,
batch_size: int,
func: Callable,
allow_none_pass_through: bool = False,
) -> Tuple:
num_outputs = _num_outputs(batched_outputs)
out_dims_as_tuple = _as_tuple(
out_dims,
num_outputs,
lambda: f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must "
f"have one dim per output (got {num_outputs} outputs) of {_get_name(func)}.",
)
# NOTE [Ignored _remove_batch_dim, _add_batch_dim]
# There is something wrong with our type bindings for functions that begin
# with '_', see #40397.
if isinstance(batched_outputs, Tensor):
out_dim = out_dims_as_tuple[0]
return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, out_dim) # type: ignore[return-value]
if allow_none_pass_through:
return tuple(
(
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
if out is not None
else None
)
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
)
else:
return tuple(
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
)
# Checks that `fn` returned one or more Tensors and nothing else.
# NB: A python function that return multiple arguments returns a single tuple,
# so we are effectively checking that `outputs` is a single Tensor or a tuple of
# Tensors.
def _validate_outputs(outputs: Any, func: Callable) -> None:
if isinstance(outputs, Tensor):
return
if not isinstance(outputs, tuple):
raise ValueError(
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
f"Tensors, got type {type(outputs)} as the return."
)
for idx, output in enumerate(outputs):
if isinstance(output, Tensor):
continue
raise ValueError(
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
f"Tensors, got type {type(output)} for return {idx}."
)
def _check_out_dims_is_int_or_int_tuple(out_dims: out_dims_t, func: Callable) -> None:
if isinstance(out_dims, int):
return
if not isinstance(out_dims, tuple) or not all(
[isinstance(out_dim, int) for out_dim in out_dims]
):
raise ValueError(
f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be "
f"an int or a tuple of int representing where in the outputs the "
f"vmapped dimension should appear."
)
def _get_name(func: Callable):
if hasattr(func, "__name__"):
return func.__name__
# Not all callables have __name__, in fact, only static functions/methods do.
# A callable created via functools.partial or an nn.Module, to name some
# examples, don't have a __name__.
return repr(func)
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
# sends those into func, and then unwraps the output BatchedTensors. Operations
# on BatchedTensors perform the batched operations that the user is asking for.
def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:
"""
vmap is the vectorizing map. Returns a new function that maps `func` over some
dimension of the inputs. Semantically, vmap pushes the map into PyTorch
operations called by `func`, effectively vectorizing those operations.
vmap is useful for handling batch dimensions: one can write a function `func`
that runs on examples and then lift it to a function that can take batches of
examples with `vmap(func)`. vmap can also be used to compute batched
gradients when composed with autograd.
.. note::
We have moved development of vmap to
`functorch. <https://github.com/pytorch/functorch>`_ functorch's
vmap is able to arbitrarily compose with gradient computation
and contains significant performance improvements.
Please give that a try if that is what you're looking for.
Furthermore, if you're interested in using vmap for your use case,
please `contact us! <https://github.com/pytorch/pytorch/issues/42368>`_
We're interested in gathering feedback from early adopters to inform
the design.
.. warning::
torch.vmap is an experimental prototype that is subject to
change and/or deletion. Please use at your own risk.
Args:
func (function): A Python function that takes one or more arguments.
Must return one or more Tensors.
in_dims (int or nested structure): Specifies which dimension of the
inputs should be mapped over. `in_dims` should have a structure
like the inputs. If the `in_dim` for a particular input is None,
then that indicates there is no map dimension. Default: 0.
out_dims (int or Tuple[int]): Specifies where the mapped dimension
should appear in the outputs. If `out_dims` is a Tuple, then it should
have one element per output. Default: 0.
Returns:
Returns a new "batched" function. It takes the same inputs as `func`,
except each input has an extra dimension at the index specified by `in_dims`.
It takes returns the same outputs as `func`, except each output has
an extra dimension at the index specified by `out_dims`.
.. warning:
vmap works best with functional-style code. Please do not perform any
side-effects in `func`, with the exception of in-place PyTorch operations.
Examples of side-effects include mutating Python data structures and
assigning values to variables not captured in `func`.
One example of using `vmap` is to compute batched dot products. PyTorch
doesn't provide a batched `torch.dot` API; instead of unsuccessfully
rummaging through docs, use `vmap` to construct a new function.
>>> torch.dot # [D], [D] -> []
>>> batched_dot = torch.vmap(torch.dot) # [N, D], [N, D] -> [N]
>>> x, y = torch.randn(2, 5), torch.randn(2, 5)
>>> batched_dot(x, y)
`vmap` can be helpful in hiding batch dimensions, leading to a simpler
model authoring experience.
>>> batch_size, feature_size = 3, 5
>>> weights = torch.randn(feature_size, requires_grad=True)
>>>
>>> def model(feature_vec):
>>> # Very simple linear model with activation
>>> return feature_vec.dot(weights).relu()
>>>
>>> examples = torch.randn(batch_size, feature_size)
>>> result = torch.vmap(model)(examples)
`vmap` can also help vectorize computations that were previously difficult
or impossible to batch. One example is higher-order gradient computation.
The PyTorch autograd engine computes vjps (vector-Jacobian products).
Computing a full Jacobian matrix for some function f: R^N -> R^N usually
requires N calls to `autograd.grad`, one per Jacobian row. Using `vmap`,
we can vectorize the whole computation, computing the Jacobian in a single
call to `autograd.grad`.
>>> # Setup
>>> N = 5
>>> f = lambda x: x ** 2
>>> x = torch.randn(N, requires_grad=True)
>>> y = f(x)
>>> I_N = torch.eye(N)
>>>
>>> # Sequential approach
>>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0]
>>> for v in I_N.unbind()]
>>> jacobian = torch.stack(jacobian_rows)
>>>
>>> # vectorized gradient computation
>>> def get_vjp(v):
>>> return torch.autograd.grad(y, x, v)
>>> jacobian = torch.vmap(get_vjp)(I_N)
.. note::
vmap does not provide general autobatching or handle variable-length
sequences out of the box.
"""
warnings.warn(
"Please use functorch.vmap instead of torch.vmap "
"(https://github.com/pytorch/functorch). "
"We've moved development on torch.vmap over to functorch; "
"functorch's vmap has a multitude of significant performance and "
"functionality improvements.",
stacklevel=2,
)
return _vmap(func, in_dims, out_dims)
# A version of vmap but without the initial "experimental prototype" warning
def _vmap(
func: Callable,
in_dims: in_dims_t = 0,
out_dims: out_dims_t = 0,
allow_none_pass_through: bool = False,
) -> Callable:
# The `allow_none_pass_through` argument is a temporary workaround may be removed.
# Currently it enables us to wrap the call in `autograd.grad` to the autograd engine,
# which may return None if any of the inputs are unused. See the issue discussing this:
# https://github.com/facebookresearch/functorch/issues/159.
@functools.wraps(func)
def wrapped(*args):
_check_out_dims_is_int_or_int_tuple(out_dims, func)
vmap_level = torch._C._vmapmode_increment_nesting()
try:
batched_inputs, batch_size = _create_batched_inputs(
in_dims, args, vmap_level, func
)
batched_outputs = func(*batched_inputs)
if not allow_none_pass_through:
_validate_outputs(batched_outputs, func)
return _unwrap_batched(
batched_outputs,
out_dims,
vmap_level,
batch_size,
func,
allow_none_pass_through=allow_none_pass_through,
)
finally:
torch._C._vmapmode_decrement_nesting()
return wrapped
|
pytorch-master
|
torch/_vmap_internals.py
|
from typing import (
List, Tuple, Optional, Union, Any, Sequence, TYPE_CHECKING
)
import torch
from torch._C import _add_docstr
import torch.nn.functional as F
from ._lowrank import svd_lowrank, pca_lowrank
from .overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
from ._jit_internal import boolean_dispatch
from ._jit_internal import _overload as overload
Tensor = torch.Tensor
from torch import _VF
__all__ = [
'atleast_1d',
'atleast_2d',
'atleast_3d',
'align_tensors',
'broadcast_shapes',
'broadcast_tensors',
'cartesian_prod',
'block_diag',
'cdist',
'chain_matmul',
'einsum',
'istft',
'lu',
'norm',
'meshgrid',
'pca_lowrank',
'split',
'stft',
'svd_lowrank',
'tensordot',
'unique',
'unique_consecutive',
]
def broadcast_tensors(*tensors):
r"""broadcast_tensors(*tensors) -> List of Tensors
Broadcasts the given tensors according to :ref:`broadcasting-semantics`.
Args:
*tensors: any number of tensors of the same type
.. warning::
More than one element of a broadcasted tensor may refer to a single
memory location. As a result, in-place operations (especially ones that
are vectorized) may result in incorrect behavior. If you need to write
to the tensors, please clone them first.
Example::
>>> x = torch.arange(3).view(1, 3)
>>> y = torch.arange(2).view(2, 1)
>>> a, b = torch.broadcast_tensors(x, y)
>>> a.size()
torch.Size([2, 3])
>>> a
tensor([[0, 1, 2],
[0, 1, 2]])
"""
# This wrapper exists to support variadic args.
if has_torch_function(tensors):
return handle_torch_function(broadcast_tensors, tensors, *tensors)
return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined]
def broadcast_shapes(*shapes):
r"""broadcast_shapes(*shapes) -> Size
Similar to :func:`broadcast_tensors` but for shapes.
This is equivalent to
``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape``
but avoids the need create to intermediate tensors. This is useful for
broadcasting tensors of common batch shape but different rightmost shape,
e.g. to broadcast mean vectors with covariance matrices.
Example::
>>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1))
torch.Size([1, 3, 2])
Args:
\*shapes (torch.Size): Shapes of tensors.
Returns:
shape (torch.Size): A shape compatible with all input shapes.
Raises:
RuntimeError: If shapes are incompatible.
"""
# This wrapper exists to support variadic args.
# TODO Move this to C++ once the jit has better support for torch.Size.
if not torch.jit.is_tracing():
max_len = 0
for shape in shapes:
if isinstance(shape, int):
if max_len < 1:
max_len = 1
elif isinstance(shape, tuple) or isinstance(shape, list):
s = len(shape)
if max_len < s:
max_len = s
result = [1] * max_len
for shape in shapes:
if isinstance(shape, int):
shape = (shape,)
if isinstance(shape, tuple) or isinstance(shape, list):
for i in range(-1, -1 - len(shape), -1):
if shape[i] < 0:
raise RuntimeError("Trying to create tensor with negative dimension ({}): ({})"
.format(shape[i], shape[i]))
if shape[i] == 1 or shape[i] == result[i]:
continue
if result[i] != 1:
raise RuntimeError("Shape mismatch: objects cannot be broadcast to a single shape")
result[i] = shape[i]
else:
raise RuntimeError("Input shapes should be of type ints, a tuple of ints, or a list of ints, got ", shape)
return torch.Size(result)
else:
# with implementation above, torch.jit.trace hardcodes the sizes which makes subsequent replays fail
with torch.no_grad():
scalar = torch.zeros((), device="cpu")
tensors = [scalar.expand(shape) for shape in shapes]
tensors = broadcast_tensors(*tensors)
return tensors[0].shape
def split(
tensor: Tensor, split_size_or_sections: Union[int, List[int]], dim: int = 0
) -> List[Tensor]:
r"""Splits the tensor into chunks. Each chunk is a view of the original tensor.
If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will
be split into equally sized chunks (if possible). Last chunk will be smaller if
the tensor size along the given dimension :attr:`dim` is not divisible by
:attr:`split_size`.
If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split
into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according
to :attr:`split_size_or_sections`.
Args:
tensor (Tensor): tensor to split.
split_size_or_sections (int) or (list(int)): size of a single chunk or
list of sizes for each chunk
dim (int): dimension along which to split the tensor.
Example::
>>> a = torch.arange(10).reshape(5,2)
>>> a
tensor([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> torch.split(a, 2)
(tensor([[0, 1],
[2, 3]]),
tensor([[4, 5],
[6, 7]]),
tensor([[8, 9]]))
>>> torch.split(a, [1,4])
(tensor([[0, 1]]),
tensor([[2, 3],
[4, 5],
[6, 7],
[8, 9]]))
"""
if has_torch_function_unary(tensor):
return handle_torch_function(
split, (tensor,), tensor, split_size_or_sections, dim=dim)
# Overwriting reason:
# This dispatches to two ATen functions depending on the type of
# split_size_or_sections. The branching code is in _tensor.py, which we
# call here.
return tensor.split(split_size_or_sections, dim)
def einsum(*args: Any) -> Tensor:
r"""einsum(equation, *operands) -> Tensor
Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation
based on the Einstein summation convention.
Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them
in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of
this format are described below, but the general idea is to label every dimension of the input :attr:`operands`
with some subscript and define which subscripts are part of the output. The output is then computed by summing
the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the
output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`.
Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).
Equation:
The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of
the input :attr:`operands` in the same order as the dimensions, separating subcripts for each operand by a
comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript
must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is
repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand
must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that
appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.
The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based
on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.
Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation
followed by the subscripts for the output. For instance, the following equation computes the transpose of a
matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and
at most once for the output.
Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.
Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,
e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth
dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the
'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not
explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions),
before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements
batch matrix multiplication `'...ij,...jk'`.
A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,
arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands.
.. note::
``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions
covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.
.. note::
This function does not optimize the given expression, so a different formula for the same computation may
run faster or consume less memory. Projects like opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/)
can optimize the formula for you.
.. note::
As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format,
subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists
follow their operands, and an extra sublist can appear at the end of the input to specify the output's
subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object
may be provided in a sublist to enable broadcasting as described in the Equation section above.
Args:
equation (str): The subscripts for the Einstein summation.
operands (List[Tensor]): The tensors to compute the Einstein summation of.
Examples::
>>> # trace
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> torch.einsum('ii', torch.randn(4, 4))
tensor(-1.2104)
>>> # diagonal
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> torch.einsum('ii->i', torch.randn(4, 4))
tensor([-0.1034, 0.7952, -0.2433, 0.4545])
>>> # outer product
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> x = torch.randn(5)
>>> y = torch.randn(4)
>>> torch.einsum('i,j->ij', x, y)
tensor([[ 0.1156, -0.2897, -0.3918, 0.4963],
[-0.3744, 0.9381, 1.2685, -1.6070],
[ 0.7208, -1.8058, -2.4419, 3.0936],
[ 0.1713, -0.4291, -0.5802, 0.7350],
[ 0.5704, -1.4290, -1.9323, 2.4480]])
>>> # batch matrix multiplication
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> As = torch.randn(3,2,5)
>>> Bs = torch.randn(3,5,4)
>>> torch.einsum('bij,bjk->bik', As, Bs)
tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
[-1.6706, -0.8097, -0.8025, -2.1183]],
[[ 4.2239, 0.3107, -0.5756, -0.2354],
[-1.4558, -0.3460, 1.5087, -0.8530]],
[[ 2.8153, 1.8787, -4.3839, -1.2112],
[ 0.3728, -2.1131, 0.0921, 0.8305]]])
>>> # with sublist format and ellipsis
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2])
tensor([[[-1.0564, -1.5904, 3.2023, 3.1271],
[-1.6706, -0.8097, -0.8025, -2.1183]],
[[ 4.2239, 0.3107, -0.5756, -0.2354],
[-1.4558, -0.3460, 1.5087, -0.8530]],
[[ 2.8153, 1.8787, -4.3839, -1.2112],
[ 0.3728, -2.1131, 0.0921, 0.8305]]])
>>> # batch permute
>>> A = torch.randn(2, 3, 4, 5)
>>> torch.einsum('...ij->...ji', A).shape
torch.Size([2, 3, 5, 4])
>>> # equivalent to torch.nn.functional.bilinear
>>> A = torch.randn(3,5,4)
>>> l = torch.randn(2,5)
>>> r = torch.randn(2,4)
>>> torch.einsum('bn,anm,bm->ba', l, A, r)
tensor([[-0.3430, -5.2405, 0.4494],
[ 0.3311, 5.5201, -3.0356]])
"""
# This wrapper exists to support variadic args.
if len(args) < 2:
raise ValueError('einsum(): must specify the equation string and at least one operand, '
'or at least one operand and its subscripts list')
equation = None
operands = None
if isinstance(args[0], torch.Tensor):
# Convert the subscript list format which is an interleaving of operand and its subscripts
# list with an optional output subscripts list at the end (see documentation for more details on this)
# to the equation string format by creating the equation string from the subscripts list and grouping the
# input operands into a tensorlist (List[Tensor]).
def parse_subscript(n: int) -> str:
if n == Ellipsis:
return '...'
if n >= 0 and n < 26:
return chr(ord('A') + n)
if n >= 26 and n < 52:
return chr(ord('a') + n - 26)
raise ValueError('einsum(): subscript in subscript list is not within the valid range [0, 52)')
# Parse subscripts for input operands
equation = ','.join(''.join(parse_subscript(s) for s in l) for l in args[1::2])
# Parse optional output subscripts (provided when the number of arguments is odd)
if len(args) % 2 == 1:
equation += '->' + ''.join(parse_subscript(s) for s in args[-1])
operands = args[:-1:2]
else:
operands = args[::2]
else:
equation = args[0]
operands = args[1:]
if has_torch_function(operands):
return handle_torch_function(einsum, operands, equation, *operands)
if len(operands) == 1 and isinstance(operands[0], (list, tuple)):
# the old interface of passing the operands as one list argument
_operands = operands[0]
# recurse incase operands contains value that has torch function
# in the original implementation this line is omitted
return einsum(equation, *_operands)
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
# This wrapper exists to support variadic args.
if TYPE_CHECKING:
# The JIT doesn't understand Union, so only add type annotation for mypy
def meshgrid(*tensors: Union[Tensor, List[Tensor]],
indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
return _meshgrid(*tensors, indexing=indexing)
else:
def meshgrid(*tensors, indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
r"""Creates grids of coordinates specified by the 1D inputs in `attr`:tensors.
This is helpful when you want to visualize data over some
range of inputs. See below for a plotting example.
Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as
inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`,
this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots
G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where
the output :math:`G_i` is constructed by expanding :math:`T_i`
to the result shape.
.. note::
0D inputs are treated equivalently to 1D inputs of a
single element.
.. warning::
`torch.meshgrid(*tensors)` currently has the same behavior
as calling `numpy.meshgrid(*arrays, indexing='ij')`.
In the future `torch.meshgrid` will transition to
`indexing='xy'` as the default.
https://github.com/pytorch/pytorch/issues/50276 tracks
this issue with the goal of migrating to NumPy's behavior.
.. seealso::
:func:`torch.cartesian_prod` has the same effect but it
collects the data in a tensor of vectors.
Args:
tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be
treated as tensors of size :math:`(1,)` automatically
indexing: (str, optional): the indexing mode, either "xy"
or "ij", defaults to "ij". See warning for future changes.
If "xy" is selected, the first dimension corresponds
to the cardinality of the second input and the second
dimension corresponds to the cardinality of the first
input.
If "ij" is selected, the dimensions are in the same
order as the cardinality of the inputs.
Returns:
seq (sequence of Tensors): If the input has :math:`N`
tensors of size :math:`S_0 \ldots S_{N-1}``, then the
output will also have :math:`N` tensors, where each tensor
is of shape :math:`(S_0, ..., S_{N-1})`.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> y = torch.tensor([4, 5, 6])
Observe the element-wise pairings across the grid, (1, 4),
(1, 5), ..., (3, 6). This is the same thing as the
cartesian product.
>>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
>>> grid_x
tensor([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> grid_y
tensor([[4, 5, 6],
[4, 5, 6],
[4, 5, 6]])
This correspondence can be seen when these grids are
stacked properly.
>>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))),
... torch.cartesian_prod(x, y))
True
`torch.meshgrid` is commonly used to produce a grid for
plotting.
>>> import matplotlib.pyplot as plt
>>> xs = torch.linspace(-5, 5, steps=100)
>>> ys = torch.linspace(-5, 5, steps=100)
>>> x, y = torch.meshgrid(xs, ys, indexing='xy')
>>> z = torch.sin(torch.sqrt(x * x + y * y))
>>> ax = plt.axes(projection='3d')
>>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy())
>>> # xdoctest: +SKIP
<mpl_toolkits.mplot3d.art3d.Poly3DCollection object at 0x7f8f30d40100>
>>> plt.show()
.. image:: ../_static/img/meshgrid.png
:width: 512
"""
return _meshgrid(*tensors, indexing=indexing)
def _meshgrid(*tensors, indexing: Optional[str]):
if has_torch_function(tensors):
return handle_torch_function(meshgrid, tensors, *tensors, indexing=indexing)
if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)):
# the old interface of passing the operands as one list argument
tensors = tensors[0] # type: ignore[assignment]
# Continue allowing call of old method that takes no indexing
# kwarg for forward compatibility reasons.
#
# Remove this two weeks after landing.
kwargs = {} if indexing is None else {'indexing': indexing}
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
def stft(input: Tensor, n_fft: int, hop_length: Optional[int] = None,
win_length: Optional[int] = None, window: Optional[Tensor] = None,
center: bool = True, pad_mode: str = 'reflect', normalized: bool = False,
onesided: Optional[bool] = None,
return_complex: Optional[bool] = None) -> Tensor:
r"""Short-time Fourier transform (STFT).
.. warning::
From version 1.8.0, :attr:`return_complex` must always be given
explicitly for real inputs and `return_complex=False` has been
deprecated. Strongly prefer `return_complex=True` as in a future
pytorch release, this function will only return complex tensors.
Note that :func:`torch.view_as_real` can be used to recover a real
tensor with an extra last dimension for real and imaginary components.
The STFT computes the Fourier transform of short overlapping windows of the
input. This giving frequency components of the signal as they change over
time. The interface of this function is modeled after (but *not* a drop-in
replacement for) librosa_ stft function.
.. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html
Ignoring the optional batch dimension, this method computes the following
expression:
.. math::
X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}%
\text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ %
\exp\left(- j \frac{2 \pi \cdot \omega k}{\text{win\_length}}\right),
where :math:`m` is the index of the sliding window, and :math:`\omega` is
the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``,
or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``.
* :attr:`input` must be either a 1-D time sequence or a 2-D batch of time
sequences.
* If :attr:`hop_length` is ``None`` (default), it is treated as equal to
``floor(n_fft / 4)``.
* If :attr:`win_length` is ``None`` (default), it is treated as equal to
:attr:`n_fft`.
* :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from
:meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is
treated as if having :math:`1` everywhere in the window. If
:math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on
both sides to length :attr:`n_fft` before being applied.
* If :attr:`center` is ``True`` (default), :attr:`input` will be padded on
both sides so that the :math:`t`-th frame is centered at time
:math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame
begins at time :math:`t \times \text{hop\_length}`.
* :attr:`pad_mode` determines the padding method used on :attr:`input` when
:attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for
all available options. Default is ``"reflect"``.
* If :attr:`onesided` is ``True`` (default for real input), only values for
:math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor
\frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because
the real-to-complex Fourier transform satisfies the conjugate symmetry,
i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`.
Note if the input or window tensors are complex, then :attr:`onesided`
output is not possible.
* If :attr:`normalized` is ``True`` (default is ``False``), the function
returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`.
* If :attr:`return_complex` is ``True`` (default if input is complex), the
return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``,
the output is a ``input.dim() + 2`` dimensional real tensor where the last
dimension represents the real and imaginary components.
Returns either a complex tensor of size :math:`(* \times N \times T)` if
:attr:`return_complex` is true, or a real tensor of size :math:`(* \times N
\times T \times 2)`. Where :math:`*` is the optional batch size of
:attr:`input`, :math:`N` is the number of frequencies where STFT is applied
and :math:`T` is the total number of frames used.
.. warning::
This function changed signature at version 0.4.1. Calling with the
previous signature may cause error or return incorrect result.
Args:
input (Tensor): the input tensor
n_fft (int): size of Fourier transform
hop_length (int, optional): the distance between neighboring sliding window
frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``)
win_length (int, optional): the size of window frame and STFT filter.
Default: ``None`` (treated as equal to :attr:`n_fft`)
window (Tensor, optional): the optional window function.
Default: ``None`` (treated as window of all :math:`1` s)
center (bool, optional): whether to pad :attr:`input` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (str, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
normalized (bool, optional): controls whether to return the normalized STFT results
Default: ``False``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy for real inputs.
Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise.
return_complex (bool, optional): whether to return a complex tensor, or
a real tensor with an extra last dimension for the real and
imaginary components.
Returns:
Tensor: A tensor containing the STFT result with shape described above
"""
if has_torch_function_unary(input):
return handle_torch_function(
stft, (input,), input, n_fft, hop_length=hop_length, win_length=win_length,
window=window, center=center, pad_mode=pad_mode, normalized=normalized,
onesided=onesided, return_complex=return_complex)
# NOTE: Do not edit. This code will be removed once the forward-compatibility
# period is over for PR #73432
if center:
signal_dim = input.dim()
extended_shape = [1] * (3 - signal_dim) + list(input.size())
pad = int(n_fft // 2)
input = F.pad(input.view(extended_shape), [pad, pad], pad_mode)
input = input.view(input.shape[-signal_dim:])
return _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]
normalized, onesided, return_complex)
istft = _add_docstr(
torch.istft,
"istft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, "
"normalized=False, onesided=None, length=None, return_complex=False) -> Tensor:\n"
r"""
Inverse short time Fourier Transform. This is expected to be the inverse of :func:`~torch.stft`.
It has the same parameters (+ additional optional parameter of :attr:`length`) and it should return the
least squares estimation of the original signal. The algorithm will check using the NOLA condition (
nonzero overlap).
Important consideration in the parameters :attr:`window` and :attr:`center` so that the envelop
created by the summation of all the windows is never zero at certain point in time. Specifically,
:math:`\sum_{t=-\infty}^{\infty} |w|^2[n-t\times hop\_length] \cancel{=} 0`.
Since :func:`~torch.stft` discards elements at the end of the signal if they do not fit in a frame,
``istft`` may return a shorter signal than the original signal (can occur if :attr:`center` is False
since the signal isn't padded). If `length` is given in the arguments and is longer than expected,
``istft`` will pad zeros to the end of the returned signal.
If :attr:`center` is ``True``, then there will be padding e.g. ``'constant'``, ``'reflect'``, etc.
Left padding can be trimmed off exactly because they can be calculated but right padding cannot be
calculated without additional information.
Example: Suppose the last window is:
``[17, 18, 0, 0, 0]`` vs ``[18, 0, 0, 0, 0]``
The :attr:`n_fft`, :attr:`hop_length`, :attr:`win_length` are all the same which prevents the calculation
of right padding. These additional values could be zeros or a reflection of the signal so providing
:attr:`length` could be useful. If :attr:`length` is ``None`` then padding will be aggressively removed
(some loss of signal).
[1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984.
Args:
input (Tensor): The input tensor. Expected to be output of :func:`~torch.stft`,
can either be complex (``channel``, ``fft_size``, ``n_frame``), or real
(``channel``, ``fft_size``, ``n_frame``, 2) where the ``channel``
dimension is optional.
.. deprecated:: 1.8.0
Real input is deprecated, use complex inputs as returned by
``stft(..., return_complex=True)`` instead.
n_fft (int): Size of Fourier transform
hop_length (Optional[int]): The distance between neighboring sliding window frames.
(Default: ``n_fft // 4``)
win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``)
window (Optional[torch.Tensor]): The optional window function.
(Default: ``torch.ones(win_length)``)
center (bool): Whether :attr:`input` was padded on both sides so that the :math:`t`-th frame is
centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
normalized (bool): Whether the STFT was normalized. (Default: ``False``)
onesided (Optional[bool]): Whether the STFT was onesided.
(Default: ``True`` if ``n_fft != fft_size`` in the input size)
length (Optional[int]): The amount to trim the signal by (i.e. the
original signal length). (Default: whole signal)
return_complex (Optional[bool]):
Whether the output should be complex, or if the input should be
assumed to derive from a real signal and window.
Note that this is incompatible with ``onesided=True``.
(Default: ``False``)
Returns:
Tensor: Least squares estimation of the original signal of size (..., signal_length)
""")
if TYPE_CHECKING:
# These _impl functions return a variable number of tensors as output with
# __torch_function__; tuple unpacking is done already rather than being
# done by the caller of the _impl function
_unique_impl_out = Any
else:
_unique_impl_out = Tuple[Tensor, Tensor, Tensor]
def _unique_impl(input: Tensor, sorted: bool = True,
return_inverse: bool = False, return_counts: bool = False,
dim: Optional[int] = None) -> _unique_impl_out:
r"""unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor]
Returns the unique elements of the input tensor.
.. note:: This function is different from :func:`torch.unique_consecutive` in the sense that
this function also eliminates non-consecutive duplicate values.
.. note:: Currently in the CUDA implementation and the CPU implementation when dim is specified,
`torch.unique` always sort the tensor at the beginning regardless of the `sort` argument.
Sorting could be slow, so if your input tensor is already sorted, it is recommended to use
:func:`torch.unique_consecutive` which avoids the sorting.
Args:
input (Tensor): the input tensor
sorted (bool): Whether to sort the unique elements in ascending order
before returning as output.
return_inverse (bool): Whether to also return the indices for where
elements in the original input ended up in the returned unique list.
return_counts (bool): Whether to also return the counts for each unique
element.
dim (int): the dimension to apply unique. If ``None``, the unique of the
flattened input is returned. default: ``None``
Returns:
(Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
- **output** (*Tensor*): the output list of unique scalar elements.
- **inverse_indices** (*Tensor*): (optional) if
:attr:`return_inverse` is True, there will be an additional
returned tensor (same shape as input) representing the indices
for where elements in the original input map to in the output;
otherwise, this function will only return a single tensor.
- **counts** (*Tensor*): (optional) if
:attr:`return_counts` is True, there will be an additional
returned tensor (same shape as output or output.size(dim),
if dim was specified) representing the number of occurrences
for each unique value or tensor.
Example::
>>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))
>>> output
>>> # xdoctest: +SKIP
tensor([ 2, 3, 1])
>>> output, inverse_indices = torch.unique(
... torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)
>>> output
tensor([ 1, 2, 3])
>>> inverse_indices
tensor([ 0, 2, 1, 2])
>>> output, inverse_indices = torch.unique(
... torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)
>>> output
tensor([ 1, 2, 3])
>>> inverse_indices
tensor([[ 0, 2],
[ 1, 2]])
"""
if has_torch_function_unary(input):
return handle_torch_function(
unique, (input,), input, sorted=sorted, return_inverse=return_inverse,
return_counts=return_counts, dim=dim)
if dim is not None:
output, inverse_indices, counts = _VF.unique_dim(
input,
dim,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
)
else:
output, inverse_indices, counts = torch._unique2(
input,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
)
return output, inverse_indices, counts
def _unique_consecutive_impl(input: Tensor, return_inverse: bool = False,
return_counts: bool = False,
dim: Optional[int] = None) -> _unique_impl_out:
r"""Eliminates all but the first element from every consecutive group of equivalent elements.
.. note:: This function is different from :func:`torch.unique` in the sense that this function
only eliminates consecutive duplicate values. This semantics is similar to `std::unique`
in C++.
Args:
input (Tensor): the input tensor
return_inverse (bool): Whether to also return the indices for where
elements in the original input ended up in the returned unique list.
return_counts (bool): Whether to also return the counts for each unique
element.
dim (int): the dimension to apply unique. If ``None``, the unique of the
flattened input is returned. default: ``None``
Returns:
(Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
- **output** (*Tensor*): the output list of unique scalar elements.
- **inverse_indices** (*Tensor*): (optional) if
:attr:`return_inverse` is True, there will be an additional
returned tensor (same shape as input) representing the indices
for where elements in the original input map to in the output;
otherwise, this function will only return a single tensor.
- **counts** (*Tensor*): (optional) if
:attr:`return_counts` is True, there will be an additional
returned tensor (same shape as output or output.size(dim),
if dim was specified) representing the number of occurrences
for each unique value or tensor.
Example::
>>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])
>>> output = torch.unique_consecutive(x)
>>> output
tensor([1, 2, 3, 1, 2])
>>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)
>>> output
tensor([1, 2, 3, 1, 2])
>>> inverse_indices
tensor([0, 0, 1, 1, 2, 3, 3, 4])
>>> output, counts = torch.unique_consecutive(x, return_counts=True)
>>> output
tensor([1, 2, 3, 1, 2])
>>> counts
tensor([2, 2, 1, 2, 1])
"""
if has_torch_function_unary(input):
return handle_torch_function(
unique_consecutive, (input,), input, return_inverse=return_inverse,
return_counts=return_counts, dim=dim)
output, inverse_indices, counts = _VF.unique_consecutive( # type: ignore[attr-defined]
input, return_inverse=return_inverse, return_counts=return_counts, dim=dim)
return output, inverse_indices, counts
def _return_counts(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
# type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
if has_torch_function_unary(input):
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
output, _, counts = _unique_impl(input, sorted, return_inverse, return_counts, dim)
return output, counts
def _return_output(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
# type: (Tensor, bool, bool, bool, Optional[int]) -> Tensor
if has_torch_function_unary(input):
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
output, _, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim)
return output
def _return_inverse(input, sorted=True, return_inverse=False, return_counts=False, dim=None):
# type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
if has_torch_function_unary(input):
return _unique_impl(input, sorted, return_inverse, return_counts, dim)
output, inverse_indices, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim)
return output, inverse_indices
_return_inverse_false = boolean_dispatch(
arg_name='return_counts',
arg_index=3,
default=False,
if_true=_return_counts,
if_false=_return_output,
module_name=__name__,
func_name='unique')
_return_inverse_true = boolean_dispatch(
arg_name='return_counts',
arg_index=3,
default=False,
if_true=_unique_impl,
if_false=_return_inverse,
module_name=__name__,
func_name='unique')
# The return type of unique depends on `return_inverse`, and `return_counts` so in order to
# resolve the output type in TorchScript we need to statically know the value of both parameters
unique = boolean_dispatch(
arg_name='return_inverse',
arg_index=2,
default=False,
if_true=_return_inverse_true,
if_false=_return_inverse_false,
module_name=__name__,
func_name='unique')
unique.__doc__ = _unique_impl.__doc__
def _consecutive_return_counts(input, return_inverse=False, return_counts=False, dim=None):
# type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
if has_torch_function_unary(input):
return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
output, _, counts = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
return output, counts
def _consecutive_return_output(input, return_inverse=False, return_counts=False, dim=None):
# type: (Tensor, bool, bool, Optional[int]) -> Tensor
if has_torch_function_unary(input):
return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
output, _, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
return output
def _consecutive_return_inverse(input, return_inverse=False, return_counts=False, dim=None):
# type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor]
if has_torch_function_unary(input):
return _unique_consecutive_impl(input, return_inverse, return_counts, dim)
output, inverse_indices, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim)
return output, inverse_indices
_consecutive_return_inverse_false = boolean_dispatch(
arg_name='return_counts',
arg_index=1,
default=False,
if_true=_consecutive_return_counts,
if_false=_consecutive_return_output,
module_name=__name__,
func_name='unique_consecutive')
_consecutive_return_inverse_true = boolean_dispatch(
arg_name='return_counts',
arg_index=1,
default=False,
if_true=_unique_consecutive_impl,
if_false=_consecutive_return_inverse,
module_name=__name__,
func_name='unique_consecutive')
# The return type of unique depends on `return_inverse`, and `return_counts` so in order to
# resolve the output type in TorchScript we need to statically know the value of both parameters
unique_consecutive = boolean_dispatch(
arg_name='return_inverse',
arg_index=2,
default=False,
if_true=_consecutive_return_inverse_true,
if_false=_consecutive_return_inverse_false,
module_name=__name__,
func_name='unique_consecutive')
unique_consecutive.__doc__ = _unique_consecutive_impl.__doc__
if TYPE_CHECKING:
pass
# There's no good way to use this type annotation without breaking JIT
# overloads. So leave untyped for mypy for now.
else:
@overload
def tensordot(a, b, dims: int = 2, out: Optional[torch.Tensor] = None):
pass
@overload # noqa: F811
def tensordot(a, b, dims: Tuple[List[int], List[int]], out: Optional[torch.Tensor] = None): # noqa: F811
pass
@overload # noqa: F811
def tensordot(a, b, dims: List[List[int]], out: Optional[torch.Tensor] = None): # noqa: F811
pass
@overload # noqa: F811
def tensordot(a, b, dims: torch.Tensor, out: Optional[torch.Tensor] = None): # noqa: F811
pass
def tensordot(a, b, dims=2, out: Optional[torch.Tensor] = None): # noqa: F811
r"""Returns a contraction of a and b over multiple dimensions.
:attr:`tensordot` implements a generalized matrix product.
Args:
a (Tensor): Left tensor to contract
b (Tensor): Right tensor to contract
dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to
contract or explicit lists of dimensions for :attr:`a` and
:attr:`b` respectively
When called with a non-negative integer argument :attr:`dims` = :math:`d`, and
the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`,
respectively, :func:`~torch.tensordot` computes
.. math::
r_{i_0,...,i_{m-d}, i_d,...,i_n}
= \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}.
When called with :attr:`dims` of the list form, the given dimensions will be contracted
in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes
in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted
dimensions.
Examples::
>>> a = torch.arange(60.).reshape(3, 4, 5)
>>> b = torch.arange(24.).reshape(4, 3, 2)
>>> torch.tensordot(a, b, dims=([1, 0], [0, 1]))
tensor([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> a = torch.randn(3, 4, 5, device='cuda')
>>> b = torch.randn(4, 5, 6, device='cuda')
>>> # xdoctest: +SKIP
>>> c = torch.tensordot(a, b, dims=2).cpu()
tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741],
[ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744],
[ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]])
>>> a = torch.randn(3, 5, 4, 6)
>>> b = torch.randn(6, 4, 5, 3)
>>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0]))
tensor([[ 7.7193, -2.4867, -10.3204],
[ 1.5513, -14.4737, -6.5113],
[ -0.2850, 4.2573, -3.5997]])
"""
if has_torch_function_variadic(a, b):
return handle_torch_function(tensordot, (a, b), a, b, dims=dims, out=out)
if not isinstance(dims, (tuple, list, torch.Tensor, int)):
raise RuntimeError("tensordot expects dims to be int or "
+ "Tuple[List[int], List[int]] or "
+ "List[List[int]] containing two lists, but got "
+ f"dims={dims}")
dims_a: List[int] = []
dims_b: List[int] = []
if isinstance(dims, (tuple, list)):
dims_a, dims_b = dims
if isinstance(dims, torch.Tensor):
num_elements = dims.numel()
if num_elements > 1:
assert dims.size()[0] == 2
dims_a = torch.jit.annotate(List[int], dims[0].tolist())
dims_b = torch.jit.annotate(List[int], dims[1].tolist())
else:
dims_val = int(dims.item())
if dims_val < 0:
raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
dims_a = list(range(-dims_val, 0))
dims_b = list(range(dims_val))
if isinstance(dims, int):
if dims < 0:
raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}")
dims_a = list(range(-dims, 0))
dims_b = list(range(dims))
if out is None:
return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore[attr-defined]
else:
return _VF.tensordot(a, b, dims_a, dims_b, out=out) # type: ignore[attr-defined]
def cartesian_prod(*tensors):
"""Do cartesian product of the given sequence of tensors. The behavior is similar to
python's `itertools.product`.
Args:
*tensors: any number of 1 dimensional tensors.
Returns:
Tensor: A tensor equivalent to converting all the input tensors into lists,
do `itertools.product` on these lists, and finally convert the resulting list
into tensor.
Example::
>>> a = [1, 2, 3]
>>> b = [4, 5]
>>> # xdoctest: +SKIP
>>> list(itertools.product(a, b))
[(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
>>> tensor_a = torch.tensor(a)
>>> tensor_b = torch.tensor(b)
>>> torch.cartesian_prod(tensor_a, tensor_b)
tensor([[1, 4],
[1, 5],
[2, 4],
[2, 5],
[3, 4],
[3, 5]])
"""
# This wrapper exists to support variadic args.
if has_torch_function(tensors):
return handle_torch_function(cartesian_prod, tensors, *tensors)
return _VF.cartesian_prod(tensors) # type: ignore[attr-defined]
def block_diag(*tensors):
"""Create a block diagonal matrix from provided tensors.
Args:
*tensors: One or more tensors with 0, 1, or 2 dimensions.
Returns:
Tensor: A 2 dimensional tensor with all the input tensors arranged in
order such that their upper left and lower right corners are
diagonally adjacent. All other elements are set to 0.
Example::
>>> import torch
>>> A = torch.tensor([[0, 1], [1, 0]])
>>> B = torch.tensor([[3, 4, 5], [6, 7, 8]])
>>> C = torch.tensor(7)
>>> D = torch.tensor([1, 2, 3])
>>> E = torch.tensor([[4], [5], [6]])
>>> torch.block_diag(A, B, C, D, E)
tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 4, 5, 0, 0, 0, 0, 0],
[0, 0, 6, 7, 8, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 7, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 2, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 6]])
"""
# This wrapper exists to support variadic args.
if has_torch_function(tensors):
return handle_torch_function(block_diag, tensors, *tensors)
return torch._C._VariableFunctions.block_diag(tensors) # type: ignore[attr-defined]
def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'):
# type: (Tensor, Tensor, float, str) -> (Tensor)
r"""Computes batched the p-norm distance between each pair of the two collections of row vectors.
Args:
x1 (Tensor): input tensor of shape :math:`B \times P \times M`.
x2 (Tensor): input tensor of shape :math:`B \times R \times M`.
p: p value for the p-norm distance to calculate between each vector pair
:math:`\in [0, \infty]`.
compute_mode:
'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate
euclidean distance (p = 2) if P > 25 or R > 25
'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate
euclidean distance (p = 2)
'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate
euclidean distance (p = 2)
Default: use_mm_for_euclid_dist_if_necessary.
If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the
output will have shape :math:`B \times P \times R`.
This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)`
if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to
`scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest
scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.
Example:
>>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
>>> a
tensor([[ 0.9041, 0.0196],
[-0.3108, -2.4423],
[-0.4821, 1.0590]])
>>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
>>> b
tensor([[-2.1763, -0.4713],
[-0.6986, 1.3702]])
>>> torch.cdist(a, b, p=2)
tensor([[3.1193, 2.0959],
[2.7138, 3.8322],
[2.2830, 0.3791]])
"""
if has_torch_function_variadic(x1, x2):
return handle_torch_function(
cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode)
if compute_mode == 'use_mm_for_euclid_dist_if_necessary':
return _VF.cdist(x1, x2, p, None) # type: ignore[attr-defined]
elif compute_mode == 'use_mm_for_euclid_dist':
return _VF.cdist(x1, x2, p, 1) # type: ignore[attr-defined]
elif compute_mode == 'donot_use_mm_for_euclid_dist':
return _VF.cdist(x1, x2, p, 2) # type: ignore[attr-defined]
else:
raise ValueError(f"{compute_mode} is not a valid value for compute_mode")
def atleast_1d(*tensors):
r"""
Returns a 1-dimensional view of each input tensor with zero dimensions.
Input tensors with one or more dimensions are returned as-is.
Args:
input (Tensor or list of Tensors)
Returns:
output (Tensor or tuple of Tensors)
Example::
>>> x = torch.randn(2)
>>> x
>>> # xdoctest: +SKIP
tensor([1.4584, 0.7583])
>>> torch.atleast_1d(x)
tensor([1.4584, 0.7583])
>>> x = torch.tensor(1.)
>>> x
tensor(1.)
>>> torch.atleast_1d(x)
tensor([1.])
>>> x = torch.tensor(0.5)
>>> y = torch.tensor(1.)
>>> torch.atleast_1d((x,y))
(tensor([0.5000]), tensor([1.]))
"""
# This wrapper exists to support variadic args.
if has_torch_function(tensors):
return handle_torch_function(atleast_1d, tensors, *tensors)
if len(tensors) == 1:
tensors = tensors[0]
return _VF.atleast_1d(tensors) # type: ignore[attr-defined]
def atleast_2d(*tensors):
r"""
Returns a 2-dimensional view of each input tensor with zero dimensions.
Input tensors with two or more dimensions are returned as-is.
Args:
input (Tensor or list of Tensors)
Returns:
output (Tensor or tuple of Tensors)
Example::
>>> x = torch.tensor(1.)
>>> x
tensor(1.)
>>> torch.atleast_2d(x)
tensor([[1.]])
>>> x = torch.randn(2,2)
>>> x
>>> # xdoctest: +SKIP
tensor([[2.2086, 2.5165],
[0.1757, 0.5194]])
>>> torch.atleast_2d(x)
tensor([[2.2086, 2.5165],
[0.1757, 0.5194]])
>>> x = torch.tensor(0.5)
>>> y = torch.tensor(1.)
>>> torch.atleast_2d((x,y))
(tensor([[0.5000]]), tensor([[1.]]))
"""
# This wrapper exists to support variadic args.
if has_torch_function(tensors):
return handle_torch_function(atleast_2d, tensors, *tensors)
if len(tensors) == 1:
tensors = tensors[0]
return _VF.atleast_2d(tensors) # type: ignore[attr-defined]
def atleast_3d(*tensors):
r"""
Returns a 3-dimensional view of each input tensor with zero dimensions.
Input tensors with three or more dimensions are returned as-is.
Args:
input (Tensor or list of Tensors)
Returns:
output (Tensor or tuple of Tensors)
Example:
>>> x = torch.tensor(0.5)
>>> x
tensor(0.5000)
>>> torch.atleast_3d(x)
tensor([[[0.5000]]])
>>> y = torch.randn(2,2)
>>> y
>>> # xdoctest: +SKIP
tensor([[-0.8079, 0.7460],
[-1.1647, 1.4734]])
>>> torch.atleast_3d(y)
tensor([[[-0.8079],
[ 0.7460]],
<BLANKLINE>
[[-1.1647],
[ 1.4734]]])
>>> x = torch.randn(1,1,1)
>>> x
tensor([[[-1.5689]]])
>>> torch.atleast_3d(x)
tensor([[[-1.5689]]])
>>> x = torch.tensor(0.5)
>>> y = torch.tensor(1.)
>>> torch.atleast_3d((x,y))
(tensor([[[0.5000]]]), tensor([[[1.]]]))
"""
# This wrapper exists to support variadic args.
if has_torch_function(tensors):
return handle_torch_function(atleast_3d, tensors, *tensors)
if len(tensors) == 1:
tensors = tensors[0]
return _VF.atleast_3d(tensors) # type: ignore[attr-defined]
if TYPE_CHECKING:
pass
# There's no good way to use this type annotation; cannot rename norm() to
# _norm_impl() in a way that doesn't break JIT overloads. So leave untyped
# for mypy for now.
# def norm(input: Tensor,
# p: Optional[Union[str, Number]] = "fro",
# dim: Optional[Union[int, List[int]]] = None,
# keepdim: bool = False,
# out: Optional[Tensor] = None,
# dtype: _dtype = None) -> Tensor:
# return _norm_impl(input, p, dim, keepdim, out, dtype)
else:
# TODO: type dim as BroadcastingList when
# https://github.com/pytorch/pytorch/issues/33782 is fixed
@overload
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None):
# type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
@overload # noqa: F811
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
# type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
@overload # noqa: F811
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
# type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
@overload # noqa: F811
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
# type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor
pass
def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811
r"""Returns the matrix norm or vector norm of a given tensor.
.. warning::
torch.norm is deprecated and may be removed in a future PyTorch release.
Its documentation and behavior may be incorrect, and it is no longer
actively maintained.
Use :func:`torch.linalg.norm`, instead, or :func:`torch.linalg.vector_norm`
when computing vector norms and :func:`torch.linalg.matrix_norm` when
computing matrix norms. Note, however, the signature for these functions
is slightly different than the signature for torch.norm.
Args:
input (Tensor): The input tensor. Its data type must be either a floating
point or complex type. For complex inputs, the norm is calculated using the
absolute value of each element. If the input is complex and neither
:attr:`dtype` nor :attr:`out` is specified, the result's data type will
be the corresponding floating point type (e.g. float if :attr:`input` is
complexfloat).
p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'``
The following norms can be calculated:
====== ============== ==========================
ord matrix norm vector norm
====== ============== ==========================
'fro' Frobenius norm --
'nuc' nuclear norm --
Number -- sum(abs(x)**ord)**(1./ord)
====== ============== ==========================
The vector norm can be calculated across any number of dimensions.
The corresponding dimensions of :attr:`input` are flattened into
one dimension, and the norm is calculated on the flattened
dimension.
Frobenius norm produces the same result as ``p=2`` in all cases
except when :attr:`dim` is a list of three or more dims, in which
case Frobenius norm throws an error.
Nuclear norm can only be calculated across exactly two dimensions.
dim (int, tuple of ints, list of ints, optional):
Specifies which dimension or dimensions of :attr:`input` to
calculate the norm across. If :attr:`dim` is ``None``, the norm will
be calculated across all dimensions of :attr:`input`. If the norm
type indicated by :attr:`p` does not support the specified number of
dimensions, an error will occur.
keepdim (bool, optional): whether the output tensors have :attr:`dim`
retained or not. Ignored if :attr:`dim` = ``None`` and
:attr:`out` = ``None``. Default: ``False``
out (Tensor, optional): the output tensor. Ignored if
:attr:`dim` = ``None`` and :attr:`out` = ``None``.
dtype (:class:`torch.dtype`, optional): the desired data type of
returned tensor. If specified, the input tensor is casted to
:attr:`dtype` while performing the operation. Default: None.
.. note::
Even though ``p='fro'`` supports any number of dimensions, the true
mathematical definition of Frobenius norm only applies to tensors with
exactly two dimensions. :func:`torch.linalg.norm` with ``ord='fro'`` aligns
with the mathematical definition, since it can only be applied across
exactly two dimensions.
Example::
>>> import torch
>>> a = torch.arange(9, dtype= torch.float) - 4
>>> b = a.reshape((3, 3))
>>> torch.norm(a)
>>> # xdoctest: +SKIP
tensor(7.7460)
>>> torch.norm(b)
tensor(7.7460)
>>> torch.norm(a, float('inf'))
tensor(4.)
>>> torch.norm(b, float('inf'))
tensor(4.)
>>> c = torch.tensor([[ 1, 2, 3],[-1, 1, 4]] , dtype= torch.float)
>>> torch.norm(c, dim=0)
tensor([1.4142, 2.2361, 5.0000])
>>> torch.norm(c, dim=1)
tensor([3.7417, 4.2426])
>>> torch.norm(c, p=1, dim=1)
tensor([6., 6.])
>>> d = torch.arange(8, dtype= torch.float).reshape(2,2,2)
>>> torch.norm(d, dim=(1,2))
tensor([ 3.7417, 11.2250])
>>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :])
(tensor(3.7417), tensor(11.2250))
"""
if has_torch_function_unary(input):
return handle_torch_function(
norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype)
ndim = input.dim()
# catch default case
if dim is None and out is None and dtype is None and p is not None:
if isinstance(p, str):
if p == "fro":
return _VF.frobenius_norm(input, dim=(), keepdim=keepdim)
if not isinstance(p, str):
_dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m))
return _VF.norm(input, p, dim=_dim, keepdim=keepdim) # type: ignore[attr-defined]
# TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed
# remove the overloads where dim is an int and replace with BraodcastingList1
# and remove next four lines, replace _dim with dim
if dim is not None:
if isinstance(dim, int):
_dim = [dim]
else:
_dim = dim
else:
_dim = None # type: ignore[assignment]
if isinstance(p, str):
if p == "fro":
if dtype is not None:
raise ValueError("dtype argument is not supported in frobenius norm")
if _dim is None:
_dim = list(range(ndim))
if out is None:
return _VF.frobenius_norm(input, _dim, keepdim=keepdim)
else:
return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out)
elif p == "nuc":
if dtype is not None:
raise ValueError("dtype argument is not supported in nuclear norm")
if _dim is None:
if out is None:
return _VF.nuclear_norm(input, keepdim=keepdim)
else:
return _VF.nuclear_norm(input, keepdim=keepdim, out=out)
else:
if out is None:
return _VF.nuclear_norm(input, _dim, keepdim=keepdim)
else:
return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out)
raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}")
else:
if _dim is None:
_dim = list(range(ndim))
if out is None:
if dtype is None:
return _VF.norm(input, p, _dim, keepdim=keepdim) # type: ignore[attr-defined]
else:
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype) # type: ignore[attr-defined]
else:
if dtype is None:
return _VF.norm(input, p, _dim, keepdim=keepdim, out=out) # type: ignore[attr-defined]
else:
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out) # type: ignore[attr-defined]
def chain_matmul(*matrices, out=None):
r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed
using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms
of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`
needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned.
If :math:`N` is 1, then this is a no-op - the original matrix is returned as is.
.. warning::
:func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release.
Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors
rather than multiple arguments.
Args:
matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined.
out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``.
Returns:
Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product
would be of dimensions :math:`p_{1} \times p_{N + 1}`.
Example::
>>> a = torch.randn(3, 4)
>>> b = torch.randn(4, 5)
>>> c = torch.randn(5, 6)
>>> d = torch.randn(6, 7)
>>> torch.chain_matmul(a, b, c, d)
>>> # xdoctest: +SKIP
tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614],
[ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163],
[ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]])
.. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition
"""
# This wrapper exists to support variadic args.
if has_torch_function(matrices):
return handle_torch_function(chain_matmul, matrices, *matrices)
if out is None:
return _VF.chain_matmul(matrices) # type: ignore[attr-defined]
else:
return _VF.chain_matmul(matrices, out=out) # type: ignore[attr-defined]
def _lu_impl(A, pivot=True, get_infos=False, out=None):
# type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor]
r"""Computes the LU factorization of a matrix or batches of matrices
:attr:`A`. Returns a tuple containing the LU factorization and
pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to
``True``.
.. warning::
:func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor`
and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a
future PyTorch release.
``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with
.. code:: python
LU, pivots = torch.linalg.lu_factor(A, compute_pivots)
``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with
.. code:: python
LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots)
.. note::
* The returned permutation matrix for every matrix in the batch is
represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``.
``pivots[i] == j`` represents that in the ``i``-th step of the algorithm,
the ``i``-th row was permuted with the ``j-1``-th row.
* LU factorization with :attr:`pivot` = ``False`` is not available
for CPU, and attempting to do so will throw an error. However,
LU factorization with :attr:`pivot` = ``False`` is available for
CUDA.
* This function does not check if the factorization was successful
or not if :attr:`get_infos` is ``True`` since the status of the
factorization is present in the third element of the return tuple.
* In the case of batches of square matrices with size less or equal
to 32 on a CUDA device, the LU factorization is repeated for
singular matrices due to the bug in the MAGMA library
(see magma issue 13).
* ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`.
.. warning::
The gradients of this function will only be finite when :attr:`A` is full rank.
This is because the LU decomposition is just differentiable at full rank matrices.
Furthermore, if :attr:`A` is close to not being full rank,
the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`.
Args:
A (Tensor): the tensor to factor of size :math:`(*, m, n)`
pivot (bool, optional): controls whether pivoting is done. Default: ``True``
get_infos (bool, optional): if set to ``True``, returns an info IntTensor.
Default: ``False``
out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,
then the elements in the tuple are Tensor, IntTensor,
and IntTensor. If :attr:`get_infos` is ``False``, then the
elements in the tuple are Tensor, IntTensor. Default: ``None``
Returns:
(Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing
- **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`
- **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`.
``pivots`` stores all the intermediate transpositions of rows.
The final permutation ``perm`` could be reconstructed by
applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``,
where ``perm`` is initially the identity permutation of :math:`m` elements
(essentially this is what :func:`torch.lu_unpack` is doing).
- **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of
size :math:`(*)` where non-zero values indicate whether factorization for the matrix or
each minibatch has succeeded or failed
Example::
>>> # xdoctest: +REQUIRES(--lapack)
>>> A = torch.randn(2, 3, 3)
>>> A_LU, pivots = torch.lu(A)
>>> A_LU
tensor([[[ 1.3506, 2.5558, -0.0816],
[ 0.1684, 1.1551, 0.1940],
[ 0.1193, 0.6189, -0.5497]],
[[ 0.4526, 1.2526, -0.3285],
[-0.7988, 0.7175, -0.9701],
[ 0.2634, -0.9255, -0.3459]]])
>>> pivots
tensor([[ 3, 3, 3],
[ 3, 3, 3]], dtype=torch.int32)
>>> A_LU, pivots, info = torch.lu(A, get_infos=True)
>>> if info.nonzero().size(0) == 0:
... print('LU factorization succeeded for all samples!')
LU factorization succeeded for all samples!
"""
# If get_infos is True, then we don't need to check for errors and vice versa
return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos))
if TYPE_CHECKING:
_ListOrSeq = Sequence[Tensor]
else:
_ListOrSeq = List[Tensor]
def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None:
get_infos_int = 1 if get_infos else 0
if out_len - get_infos_int != 2:
raise TypeError(f"expected tuple of {2 + int(get_infos)} elements but got {out_len}")
if not isinstance(out, (tuple, list)):
raise TypeError(f"argument 'out' must be tuple of Tensors, not {type(out).__name__}")
def _lu_with_infos(A, pivot=True, get_infos=False, out=None):
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor]
if has_torch_function_unary(A):
return handle_torch_function(
lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
result = _lu_impl(A, pivot, get_infos, out)
if out is not None:
_check_list_size(len(out), get_infos, out)
for i in range(len(out)):
out[i].resize_as_(result[i]).copy_(result[i])
return out
else:
return result # A_LU, pivots, infos
def _lu_no_infos(A, pivot=True, get_infos=False, out=None):
# type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor]
# need to check for torch_function here so that we exit if
if has_torch_function_unary(A):
return handle_torch_function(
lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out)
result = _lu_impl(A, pivot, get_infos, out)
if out is not None:
_check_list_size(len(out), get_infos, out)
for i in range(len(out)):
out[i].resize_as_(result[i]).copy_(result[i])
return out
else:
return result[0], result[1] # A_LU, pivots
# The return type of lu depends on `get_infos`, so in order to resolve the output type
# of lu in TorchScript we need to statically know the value of `get_infos`
lu = boolean_dispatch(
arg_name='get_infos',
arg_index=2,
default=False,
if_true=_lu_with_infos,
if_false=_lu_no_infos,
module_name=__name__,
func_name='lu')
lu.__doc__ = _lu_impl.__doc__
def align_tensors(*tensors):
raise RuntimeError('`align_tensors` not yet implemented.')
|
pytorch-master
|
torch/functional.py
|
"""Implement various linear algebra algorithms for low rank matrices.
"""
__all__ = ["svd_lowrank", "pca_lowrank"]
from typing import Optional, Tuple
import torch
from torch import Tensor
from . import _linalg_utils as _utils
from .overrides import handle_torch_function, has_torch_function
def get_approximate_basis(
A: Tensor, q: int, niter: Optional[int] = 2, M: Optional[Tensor] = None
) -> Tensor:
"""Return tensor :math:`Q` with :math:`q` orthonormal columns such
that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
approximates :math:`A - M`.
.. note:: The implementation is based on the Algorithm 4.4 from
Halko et al, 2009.
.. note:: For an adequate approximation of a k-rank matrix
:math:`A`, where k is not known in advance but could be
estimated, the number of :math:`Q` columns, q, can be
choosen according to the following criteria: in general,
:math:`k <= q <= min(2*k, m, n)`. For large low-rank
matrices, take :math:`q = k + 5..10`. If k is
relatively small compared to :math:`min(m, n)`, choosing
:math:`q = k + 0..2` may be sufficient.
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
Args::
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int): the dimension of subspace spanned by :math:`Q`
columns.
niter (int, optional): the number of subspace iterations to
conduct; ``niter`` must be a
nonnegative integer. In most cases, the
default value 2 is more than enough.
M (Tensor, optional): the input tensor's mean of size
:math:`(*, 1, n)`.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
niter = 2 if niter is None else niter
m, n = A.shape[-2:]
dtype = _utils.get_floating_dtype(A)
matmul = _utils.matmul
R = torch.randn(n, q, dtype=dtype, device=A.device)
# The following code could be made faster using torch.geqrf + torch.ormqr
# but geqrf is not differentiable
A_H = _utils.transjugate(A)
if M is None:
Q = torch.linalg.qr(matmul(A, R)).Q
for i in range(niter):
Q = torch.linalg.qr(matmul(A_H, Q)).Q
Q = torch.linalg.qr(matmul(A, Q)).Q
else:
M_H = _utils.transjugate(M)
Q = torch.linalg.qr(matmul(A, R) - matmul(M, R)).Q
for i in range(niter):
Q = torch.linalg.qr(matmul(A_H, Q) - matmul(M_H, Q)).Q
Q = torch.linalg.qr(matmul(A, Q) - matmul(M, Q)).Q
return Q
def svd_lowrank(
A: Tensor,
q: Optional[int] = 6,
niter: Optional[int] = 2,
M: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
batches of matrices, or a sparse matrix :math:`A` such that
:math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
SVD is computed for the matrix :math:`A - M`.
.. note:: The implementation is based on the Algorithm 5.1 from
Halko et al, 2009.
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
.. note:: The input is assumed to be a low-rank matrix.
.. note:: In general, use the full-rank SVD implementation
:func:`torch.linalg.svd` for dense matrices due to its 10-fold
higher performance characteristics. The low-rank SVD
will be useful for huge sparse matrices that
:func:`torch.linalg.svd` cannot handle.
Args::
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int, optional): a slightly overestimated rank of A.
niter (int, optional): the number of subspace iterations to
conduct; niter must be a nonnegative
integer, and defaults to 2
M (Tensor, optional): the input tensor's mean of size
:math:`(*, 1, n)`.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <https://arxiv.org/abs/0909.4061>`_).
"""
if not torch.jit.is_scripting():
tensor_ops = (A, M)
if not set(map(type, tensor_ops)).issubset(
(torch.Tensor, type(None))
) and has_torch_function(tensor_ops):
return handle_torch_function(
svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M
)
return _svd_lowrank(A, q=q, niter=niter, M=M)
def _svd_lowrank(
A: Tensor,
q: Optional[int] = 6,
niter: Optional[int] = 2,
M: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor]:
q = 6 if q is None else q
m, n = A.shape[-2:]
matmul = _utils.matmul
if M is None:
M_t = None
else:
M_t = _utils.transpose(M)
A_t = _utils.transpose(A)
# Algorithm 5.1 in Halko et al 2009, slightly modified to reduce
# the number conjugate and transpose operations
if m < n or n > q:
# computing the SVD approximation of a transpose in
# order to keep B shape minimal (the m < n case) or the V
# shape small (the n > q case)
Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)
Q_c = _utils.conjugate(Q)
if M is None:
B_t = matmul(A, Q_c)
else:
B_t = matmul(A, Q_c) - matmul(M, Q_c)
assert B_t.shape[-2] == m, (B_t.shape, m)
assert B_t.shape[-1] == q, (B_t.shape, q)
assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
V = Vh.mH
V = Q.matmul(V)
else:
Q = get_approximate_basis(A, q, niter=niter, M=M)
Q_c = _utils.conjugate(Q)
if M is None:
B = matmul(A_t, Q_c)
else:
B = matmul(A_t, Q_c) - matmul(M_t, Q_c)
B_t = _utils.transpose(B)
assert B_t.shape[-2] == q, (B_t.shape, q)
assert B_t.shape[-1] == n, (B_t.shape, n)
assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
V = Vh.mH
U = Q.matmul(U)
return U, S, V
def pca_lowrank(
A: Tensor, q: Optional[int] = None, center: bool = True, niter: int = 2
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Performs linear Principal Component Analysis (PCA) on a low-rank
matrix, batches of such matrices, or sparse matrix.
This function returns a namedtuple ``(U, S, V)`` which is the
nearly optimal approximation of a singular value decomposition of
a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
.. note:: The relation of ``(U, S, V)`` to PCA is as follows:
- :math:`A` is a data matrix with ``m`` samples and
``n`` features
- the :math:`V` columns represent the principal directions
- :math:`S ** 2 / (m - 1)` contains the eigenvalues of
:math:`A^T A / (m - 1)` which is the covariance of
``A`` when ``center=True`` is provided.
- ``matmul(A, V[:, :k])`` projects data to the first k
principal components
.. note:: Different from the standard SVD, the size of returned
matrices depend on the specified rank and q
values as follows:
- :math:`U` is m x q matrix
- :math:`S` is q-vector
- :math:`V` is n x q matrix
.. note:: To obtain repeatable results, reset the seed for the
pseudorandom number generator
Args:
A (Tensor): the input tensor of size :math:`(*, m, n)`
q (int, optional): a slightly overestimated rank of
:math:`A`. By default, ``q = min(6, m,
n)``.
center (bool, optional): if True, center the input tensor,
otherwise, assume that the input is
centered.
niter (int, optional): the number of subspace iterations to
conduct; niter must be a nonnegative
integer, and defaults to 2.
References::
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
structure with randomness: probabilistic algorithms for
constructing approximate matrix decompositions,
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
`arXiv <http://arxiv.org/abs/0909.4061>`_).
"""
if not torch.jit.is_scripting():
if type(A) is not torch.Tensor and has_torch_function((A,)):
return handle_torch_function(
pca_lowrank, (A,), A, q=q, center=center, niter=niter
)
(m, n) = A.shape[-2:]
if q is None:
q = min(6, m, n)
elif not (q >= 0 and q <= min(m, n)):
raise ValueError(
"q(={}) must be non-negative integer"
" and not greater than min(m, n)={}".format(q, min(m, n))
)
if not (niter >= 0):
raise ValueError("niter(={}) must be non-negative integer".format(niter))
dtype = _utils.get_floating_dtype(A)
if not center:
return _svd_lowrank(A, q, niter=niter, M=None)
if _utils.is_sparse(A):
if len(A.shape) != 2:
raise ValueError("pca_lowrank input is expected to be 2-dimensional tensor")
c = torch.sparse.sum(A, dim=(-2,)) / m
# reshape c
column_indices = c.indices()[0]
indices = torch.zeros(
2,
len(column_indices),
dtype=column_indices.dtype,
device=column_indices.device,
)
indices[0] = column_indices
C_t = torch.sparse_coo_tensor(
indices, c.values(), (n, 1), dtype=dtype, device=A.device
)
ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
return _svd_lowrank(A, q, niter=niter, M=M)
else:
C = A.mean(dim=(-2,), keepdim=True)
return _svd_lowrank(A - C, q, niter=niter, M=None)
|
pytorch-master
|
torch/_lowrank.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
# flake8: noqa
"""
This file is directly from
https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
The license of https://github.com/ActiveState/appdirs copied below:
# This is the MIT license
Copyright (c) 2010 ActiveState Software Inc.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""Utilities for determining application-specific dirs.
See <https://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version__ = "1.4.4"
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
import os
import sys
unicode = str
if sys.platform.startswith("java"):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
system = "win32"
elif os_name.startswith("Mac"): # "Mac OS X", etc.
system = "darwin"
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = "linux2"
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == "darwin":
path = os.path.expanduser("~/Library/Application Support/")
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == "darwin":
path = os.path.expanduser("/Library/Application Support")
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv(
"XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"])
)
pathlist = [
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: ~/Library/Preferences/<AppName>
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system == "win32":
path = user_data_dir(appname, appauthor, None, roaming)
elif system == "darwin":
path = os.path.expanduser("~/Library/Preferences/")
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
elif system == "darwin":
path = os.path.expanduser("/Library/Preferences")
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
pathlist = [
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == "darwin":
path = os.path.expanduser("~/Library/Caches")
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(
self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(
self.appname, self.appauthor, version=self.version, roaming=self.roaming
)
@property
def site_data_dir(self):
return site_data_dir(
self.appname, self.appauthor, version=self.version, multipath=self.multipath
)
@property
def user_config_dir(self):
return user_config_dir(
self.appname, self.appauthor, version=self.version, roaming=self.roaming
)
@property
def site_config_dir(self):
return site_config_dir(
self.appname, self.appauthor, version=self.version, multipath=self.multipath
)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor, version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor, version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor, version=self.version)
# ---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shell, shellcon
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros("c", buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(
None,
getattr(win32.ShlObj, csidl_name),
None,
win32.ShlObj.SHGFP_TYPE_CURRENT,
buf,
)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros("c", buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
# ---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = (
"user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir",
)
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
pytorch-master
|
torch/_appdirs.py
|
import io
import torch
from ._utils import _type, _cuda
from torch.types import Storage
from typing import Any, TypeVar, Type, Union, cast
import copy
import collections
from functools import lru_cache
try:
import numpy as np
HAS_NUMPY = True
except ModuleNotFoundError:
np = None # type: ignore[assignment]
T = TypeVar('T', bound='Union[_StorageBase, TypedStorage]')
class _StorageBase(object):
_cdata: Any
is_sparse: bool = False
is_sparse_csr: bool = False
device: torch.device
def __init__(self, *args, **kwargs): ... # noqa: E704
def __len__(self) -> int: ... # noqa: E704
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T, non_blocking: bool = None) -> T: ... # noqa: E704
def nbytes(self) -> int: ... # noqa: E704
def size(self) -> int:
return self.nbytes()
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
def element_size(self) -> int: ... # noqa: E704
def get_device(self) -> int: ... # noqa: E704
def data_ptr(self) -> int: ... # noqa: E704
# Defined in torch/csrc/generic/StorageSharing.cpp
def _share_filename_cpu_(self, *args, **kwargs): ... # noqa: E704
def _share_fd_cpu_(self, *args, **kwargs): ... # noqa: E704
@classmethod
def _new_using_filename_cpu(cls: Type[T], size: int) -> T: ... # noqa: E704
@classmethod
def _new_using_fd_cpu(cls: Type[T], size: int) -> T: ... # noqa: E704
@classmethod
def from_buffer(cls, *args, **kwargs) -> T: ... # noqa: E704
@classmethod
def _new_shared_filename_cpu(cls, manager, obj, size, *, device=None, dtype=None) -> T: ... # noqa: E704
@classmethod
def _release_ipc_counter_cuda(cls, *args, **kwargs) -> T: ... # noqa: E704
@classmethod
def _new_with_weak_ptr(cls, *args, **kwargs) -> T: ... # noqa: E704
def _shared_decref(self) -> T: ... # noqa: E704
def _write_file(self, *args, **kwargs): ... # noqa: E704
def resize_(self, size: int): ... # noqa: E704
def _weak_ref(self, *args, **kwargs) -> T: ... # noqa: E704
def is_pinned(self) -> bool: ... # noqa: E704
def _set_from_file(self, *args, **kwargs): ... # noqa: E704
def _set_cdata(self, *args, **kwargs): ... # noqa: E704
def _share_cuda_(self, *args, **kwargs): ... # noqa: E704
def is_shared(self) -> bool: ... # noqa: E704
@classmethod
def _new_shared_cuda(cls, *args, **kwargs) -> T: ... # noqa: E704
def _shared_incref(self, *args, **kwargs): ... # noqa: E704
@classmethod
def _free_weak_ref(cls, *args, **kwargs): ... # noqa: E704
@property
def is_cuda(self): ... # noqa: E704
@classmethod
def from_file(cls, filename, shared, nbytes) -> T: ... # noqa: E704
@classmethod
def _expired(cls, *args, **kwargs) -> T: ... # noqa: E704
def __str__(self):
info_str = (
f'[{torch.typename(self)}(device={self.device}) '
f'of size {len(self)}]')
if self.device.type == 'meta':
return '...\n' + info_str
else:
data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
return data_str + '\n' + info_str
def __repr__(self):
return str(self)
def __iter__(self):
return iter(map(lambda i: self[i], range(self.size())))
def __copy__(self):
return self.clone()
def __deepcopy__(self, memo):
memo = memo.setdefault('torch', {})
if self._cdata in memo:
return memo[self._cdata]
new_storage = self.clone()
memo[self._cdata] = new_storage
return new_storage
def __reduce__(self):
b = io.BytesIO()
torch.save(self, b, _use_new_zipfile_serialization=False)
return (_load_from_bytes, (b.getvalue(),))
def __sizeof__(self):
return super(_StorageBase, self).__sizeof__() + self.size()
def clone(self):
"""Returns a copy of this storage"""
return type(self)(self.nbytes(), device=self.device).copy_(self)
def tolist(self):
"""Returns a list containing the elements of this storage"""
return list(self)
def cpu(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""
if self.device.type != 'cpu':
return torch.UntypedStorage(self.size()).copy_(self, False)
else:
return self
def mps(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""
if self.device.type != 'mps':
return torch.UntypedStorage(self.size(), device="mps").copy_(self, False)
else:
return self
def _to(self, dtype):
if not isinstance(dtype, torch.dtype):
raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")
storage = torch.tensor([], dtype=torch.uint8, device=self.device).set_(cast(Storage, self)).to(dtype).storage()
if storage.data_ptr() == self.data_ptr():
storage = storage.clone()
return storage
def double(self):
"""Casts this storage to double type"""
return self._to(torch.double)
def float(self):
"""Casts this storage to float type"""
return self._to(torch.float)
def half(self):
"""Casts this storage to half type"""
return self._to(torch.half)
def long(self):
"""Casts this storage to long type"""
return self._to(torch.long)
def int(self):
"""Casts this storage to int type"""
return self._to(torch.int)
def short(self):
"""Casts this storage to short type"""
return self._to(torch.short)
def char(self):
"""Casts this storage to char type"""
return self._to(torch.int8)
def byte(self):
"""Casts this storage to byte type"""
return self._to(torch.uint8)
def bool(self):
"""Casts this storage to bool type"""
return self._to(torch.bool)
def bfloat16(self):
"""Casts this storage to bfloat16 type"""
return self._to(torch.bfloat16)
def complex_double(self):
"""Casts this storage to complex double type"""
return self._to(torch.cdouble)
def complex_float(self):
"""Casts this storage to complex float type"""
return self._to(torch.cfloat)
def pin_memory(self):
"""Copies the storage to pinned memory, if it's not already pinned."""
if self.is_cuda:
raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
import torch.cuda
allocator = torch.cuda.memory._host_allocator() # type: ignore[attr-defined]
return type(self)(self.size(), allocator=allocator).copy_(self)
def share_memory_(self):
"""Moves the storage to shared memory.
This is a no-op for storages already in shared memory and for CUDA
storages, which do not need to be moved for sharing across processes.
Storages in shared memory cannot be resized.
Returns: self
"""
from torch.multiprocessing import get_sharing_strategy
if self.is_cuda:
pass # CUDA doesn't use POSIX shared memory
elif get_sharing_strategy() == 'file_system':
self._share_filename_cpu_()
else:
self._share_fd_cpu_()
return self
@classmethod
def _new_shared(cls, size, *, device='cpu'):
"""Creates a new storage in shared memory with the same data type"""
from torch.multiprocessing import get_sharing_strategy
device = torch.device(device)
if device.type == 'cuda':
return cls(size, device=device)
elif get_sharing_strategy() == 'file_system':
return cls._new_using_filename_cpu(size)
else:
return cls._new_using_fd_cpu(size)
def untyped(self):
return self
class UntypedStorage(torch._C.StorageBase, _StorageBase):
def __getitem__(self, *args, **kwargs):
if self.device.type == 'meta':
raise NotImplementedError("Not available for 'meta' device type")
return super().__getitem__(*args, **kwargs)
@property
def is_cuda(self):
return self.device.type == 'cuda'
def _load_from_bytes(b):
return torch.load(io.BytesIO(b))
_StorageBase.type = _type # type: ignore[assignment]
_StorageBase.cuda = _cuda # type: ignore[assignment]
@lru_cache(maxsize=None)
def _dtype_to_storage_type_map():
# NOTE: We should no longer add dtypes to this map. This map
# is only used for BC/FC with older PyTorch versions. Going forward,
# new dtypes of TypedStorage should not translate to a legacy
# <type>Storage class. Instead, new dtypes of TypedStorage should
# be serialized as an UntypedStorage paired with a torch.dtype
return {
torch.double: 'DoubleStorage',
torch.float: 'FloatStorage',
torch.half: 'HalfStorage',
torch.long: 'LongStorage',
torch.int: 'IntStorage',
torch.int16: 'ShortStorage',
torch.int8: 'CharStorage',
torch.uint8: 'ByteStorage',
torch.bool: 'BoolStorage',
torch.bfloat16: 'BFloat16Storage',
torch.cdouble: 'ComplexDoubleStorage',
torch.cfloat: 'ComplexFloatStorage',
torch.qint8: 'QInt8Storage',
torch.qint32: 'QInt32Storage',
torch.quint8: 'QUInt8Storage',
torch.quint4x2: 'QUInt4x2Storage',
torch.quint2x4: 'QUInt2x4Storage',
}
@lru_cache(maxsize=None)
def _storage_type_to_dtype_map():
dtype_map = {
val: key for key, val in _dtype_to_storage_type_map().items()}
return dtype_map
def _get_storage_from_sequence(sequence, dtype, device):
if dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
interpret_dtypes = {
torch.quint8: torch.uint8,
torch.quint4x2: torch.uint8,
torch.quint2x4: torch.uint8,
torch.qint32: torch.int32,
torch.qint8: torch.int8
}
tmp_tensor = torch.tensor(
sequence,
dtype=interpret_dtypes[dtype],
device=device)
else:
tmp_tensor = torch.tensor(
sequence,
dtype=dtype,
device=device)
return tmp_tensor.storage().untyped()
def _isint(x):
if HAS_NUMPY:
return isinstance(x, (int, np.integer))
else:
return isinstance(x, int)
class TypedStorage:
is_sparse = False
dtype: torch.dtype
def fill_(self, value):
self[0:len(self)] = value
return self
def __new__(cls, *args, wrap_storage=None, dtype=None, device=None):
if cls == torch.storage._LegacyStorage:
raise RuntimeError("Only child classes of _LegacyStorage can be instantiated")
if cls == TypedStorage:
return super().__new__(cls)
else:
arg_error_msg = (
f'{cls}.__new__ received an invalid combination '
f'of arguments. Expected one of:\n'
' * no arguments\n'
' * (int size)\n'
' * (Sequence data)\n'
' * (*, UntypedStorage wrap_storage)')
if device is not None:
raise RuntimeError(
arg_error_msg +
"\nKeyword argument 'device' cannot be specified")
if dtype is not None:
raise RuntimeError(
arg_error_msg +
"\nKeyword argument 'dtype' cannot be specified")
if wrap_storage is None:
if len(args) > 1:
raise RuntimeError(
arg_error_msg +
"\nToo many positional arguments")
if len(args) == 1 and not _isint(args[0]) and not isinstance(args[0], collections.abc.Sequence):
raise TypeError(
arg_error_msg +
f"\nArgument type not recognized: {type(args[0])}")
return TypedStorage(
*args,
dtype=cls.dtype,
device='cuda' if cls.__module__ == 'torch.cuda' else 'cpu')
else:
if len(args) != 0:
raise RuntimeError(
arg_error_msg +
"\nNo positional arguments should be given when using "
"'wrap_storage'")
if not isinstance(wrap_storage, torch.UntypedStorage):
raise TypeError(
arg_error_msg +
f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")
cls_device = 'cuda' if cls.__module__ == 'torch.cuda' else 'cpu'
if wrap_storage.device.type != cls_device:
raise RuntimeError(
arg_error_msg +
f"\nDevice of 'wrap_storage' must be {cls_device}"
f", but got {wrap_storage.device.type}")
return TypedStorage(
*args,
wrap_storage=wrap_storage,
dtype=cls.dtype)
def __init__(self, *args, device=None, dtype=None, wrap_storage=None):
arg_error_msg = (
'TypedStorage.__init__ received an invalid combination '
'of arguments. Expected one of:\n'
' * (*, torch.device device, torch.dtype dtype)\n'
' * (int size, *, torch.device device, torch.dtype dtype)\n'
' * (Sequence data, *, torch.device device, torch.dtype dtype)\n'
' * (*, UntypedStorage wrap_storage, torch.dtype dtype)')
if wrap_storage is not None:
if len(args) != 0:
raise RuntimeError(
arg_error_msg +
"\nNo positional arguments should be given when using "
"'wrap_storage'")
if dtype is None:
raise RuntimeError(
arg_error_msg +
"\nArgument 'dtype' must be specified")
if not isinstance(dtype, torch.dtype):
raise TypeError(
arg_error_msg +
f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}")
if device is not None:
raise RuntimeError(
arg_error_msg +
"\nArgument 'device' should not be specified when 'wrap_storage' is given")
self.dtype = dtype
if not isinstance(wrap_storage, torch.UntypedStorage):
raise TypeError(
arg_error_msg +
f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}")
self._storage = wrap_storage
else:
self.dtype = torch.get_default_dtype() if dtype is None else dtype
device = torch.device('cpu' if device is None else device)
if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
if device.type == 'cuda':
raise RuntimeError("Cannot create CUDA storage with quantized dtype")
if len(args) == 0:
self._storage = torch.UntypedStorage(device=device)
elif len(args) == 1:
if _isint(args[0]):
self._storage = torch.UntypedStorage(int(args[0]) * self.element_size(), device=device)
elif isinstance(args[0], collections.abc.Sequence):
self._storage = _get_storage_from_sequence(args[0], self.dtype, device)
else:
raise TypeError(
arg_error_msg +
f"\nArgument type not recognized: {type(args[0])}")
else:
raise RuntimeError(
arg_error_msg +
"\nToo many positional arguments")
@property
def is_cuda(self):
return self.device.type == 'cuda'
def untyped(self):
"""Returns the internal :class:`torch.UntypedStorage`"""
return self._storage
def _new_wrapped_storage(self, untyped_storage):
assert type(untyped_storage) == torch.UntypedStorage
if type(self) == TypedStorage:
return TypedStorage(wrap_storage=untyped_storage, dtype=self.dtype)
else:
return type(self)(wrap_storage=untyped_storage)
def __len__(self):
return self._storage.nbytes() // self.element_size()
def _maybe_wrap_index(self, idx, is_stop=False):
if idx is None:
if is_stop:
return self.size()
else:
return 0
else:
if type(idx) != int:
raise TypeError(
f"can't index a {type(self)} with {type(idx)}")
if is_stop:
if (idx > self.size()) or (idx < -self.size()):
raise IndexError(
f'index {idx} out of range for storage of size {self.size()}')
if idx > 0:
return idx
else:
return idx % self.size()
else:
if (idx >= self.size()) or (idx < -self.size()):
raise IndexError(
f'index {idx} out of range for storage of size {self.size()}')
return idx % self.size()
def __setitem__(self, idx, value):
if not isinstance(idx, (int, slice)):
raise RuntimeError(f"can't index a {type(self)} with {type(idx)}")
if torch.is_storage(value):
raise RuntimeError(f'cannot set item with value type {type(value)}')
if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
interpret_dtypes = {
torch.quint8: torch.uint8,
torch.quint4x2: torch.uint8,
torch.quint2x4: torch.uint8,
torch.qint32: torch.int32,
torch.qint8: torch.int8
}
tmp_dtype = interpret_dtypes[self.dtype]
tmp_tensor = torch.tensor([], dtype=tmp_dtype, device=self.device).set_(TypedStorage(
wrap_storage=self._storage,
dtype=tmp_dtype))
else:
tmp_tensor = torch.tensor([], dtype=self.dtype, device=self.device).set_(self)
tmp_tensor[idx] = value
def __getitem__(self, idx):
if self.device.type == 'meta':
raise NotImplementedError("Not available for 'meta' device type")
# NOTE: Before TypedStorage existed, indexing with a slice used to be
# possible for <type>Storage objects. However, it would return
# a storage view, which would be a hassle to implement in TypedStorage,
# so it was disabled
if isinstance(idx, slice):
raise RuntimeError('slices are only supported in UntypedStorage.__getitem__')
elif not isinstance(idx, int):
raise RuntimeError(f"can't index a {type(self)} with {type(idx)}")
if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
interpret_dtypes = {
torch.quint8: torch.uint8,
torch.quint4x2: torch.uint8,
torch.quint2x4: torch.uint8,
torch.qint32: torch.int32,
torch.qint8: torch.int8
}
return TypedStorage(
wrap_storage=self._storage,
dtype=interpret_dtypes[self.dtype])[idx]
idx_wrapped = self._maybe_wrap_index(idx)
tmp_tensor = torch.tensor([], dtype=self.dtype, device=self.device).set_(self)
return tmp_tensor[idx_wrapped].item()
def copy_(self, source: T, non_blocking: bool = None):
self._storage.copy_(source.untyped(), non_blocking)
return self
def nbytes(self):
return self._storage.nbytes()
def type(self, dtype: str = None, non_blocking: bool = False) -> Union[T, str]:
if dtype is None:
legacy_class = self._get_legacy_storage_class()
if legacy_class is not None:
return legacy_class.__module__ + '.' + legacy_class.__name__
return '.'.join([self.__module__, type(self).__name__])
else:
return self._storage.type(dtype, non_blocking)
def cuda(self, device=None, non_blocking=False, **kwargs) -> T:
if self.dtype in [torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8]:
raise RuntimeError("Cannot create CUDA storage with quantized dtype")
cuda_storage: torch.UntypedStorage = self._storage.cuda(device, non_blocking, **kwargs)
return self._new_wrapped_storage(cuda_storage)
def element_size(self):
return torch._utils._element_size(self.dtype)
def get_device(self) -> int:
return self._storage.get_device()
def __str__(self):
info_str = (
f'[{torch.typename(self)}(dtype={self.dtype}, '
f'device={self.device}) of size {len(self)}]')
if self.device.type == 'meta':
return '...\n' + info_str
else:
data_str = ' ' + '\n '.join(str(self[i]) for i in range(self.size()))
return data_str + '\n' + info_str
def __repr__(self):
return str(self)
def __iter__(self):
return iter(map(lambda i: self[i], range(self.size())))
def __copy__(self):
return self._new_wrapped_storage(copy.copy(self._storage))
def __deepcopy__(self, memo):
return self._new_wrapped_storage(copy.deepcopy(self._storage, memo))
def __sizeof__(self):
return super(TypedStorage, self).__sizeof__() + self.nbytes()
def clone(self):
"""Returns a copy of this storage"""
return self._new_wrapped_storage(self._storage.clone())
def tolist(self):
"""Returns a list containing the elements of this storage"""
return list(self)
def cpu(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""
return self._new_wrapped_storage(self._storage.cpu())
def pin_memory(self):
"""Coppies the storage to pinned memory, if it's not already pinned."""
return self._new_wrapped_storage(self._storage.pin_memory())
def share_memory_(self):
"""Moves the storage to shared memory.
This is a no-op for storages already in shared memory and for CUDA
storages, which do not need to be moved for sharing across processes.
Storages in shared memory cannot be resized.
Returns: self
"""
self._storage.share_memory_()
return self
def _new_shared(self, size, *, device=None):
"""Creates a new storage in shared memory with the same data type"""
if device is None:
device = 'cpu'
device = torch.device(device)
untyped_storage = torch.UntypedStorage._new_shared(size * self.element_size(), device=device)
return TypedStorage(
wrap_storage=untyped_storage,
dtype=self.dtype)
@property
def _cdata(self):
return self._storage._cdata
@property
def device(self):
return self._storage.device
def size(self):
return len(self)
def pickle_storage_type(self):
try:
return _dtype_to_storage_type_map()[self.dtype]
except KeyError:
raise KeyError(f'dtype {self.dtype} is not recognized')
def __reduce__(self):
b = io.BytesIO()
torch.save(self, b, _use_new_zipfile_serialization=False)
return (_load_from_bytes, (b.getvalue(),))
def data_ptr(self):
return self._storage.data_ptr()
def resize_(self, size):
self._storage.resize_(size * self.element_size())
@classmethod
def _free_weak_ref(cls, *args, **kwargs):
return UntypedStorage._free_weak_ref(*args, **kwargs)
def _weak_ref(self, *args, **kwargs):
return self._storage._weak_ref(*args, **kwargs)
@classmethod
def from_buffer(cls, *args, dtype=None, device=None, **kwargs):
if cls == TypedStorage:
dtype = torch.get_default_dtype() if dtype is None else dtype
device = torch.device('cpu' if device is None else device)
if device.type != 'cpu':
raise RuntimeError(f'TypedStorage.from_buffer: Not available for device {device.type}')
untyped_storage: torch.UntypedStorage = torch.UntypedStorage.from_buffer(*args, dtype=dtype, **kwargs)
else:
if dtype is not None or len(args) == 5:
raise RuntimeError((
"from_buffer: 'dtype' can only be specified in "
"UntypedStorage.from_buffer and TypedStorage.from_buffer"))
if device is not None:
raise RuntimeError((
"from_buffer: 'device' can only be specified in "
"UntypedStorage.from_buffer and TypedStorage.from_buffer"))
dtype = cls.dtype
untyped_storage = torch.UntypedStorage.from_buffer(*args, dtype=dtype, **kwargs)
return TypedStorage(wrap_storage=untyped_storage, dtype=dtype)
def _to(self, dtype):
if not isinstance(dtype, torch.dtype):
raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}")
storage = torch.tensor([], dtype=self.dtype, device=self.device).set_(self).to(dtype).storage()
if storage.data_ptr() == self.data_ptr():
storage = storage.clone()
return storage
def double(self):
"""Casts this storage to double type"""
return self._to(torch.double)
def float(self):
"""Casts this storage to float type"""
return self._to(torch.float)
def half(self):
"""Casts this storage to half type"""
return self._to(torch.half)
def long(self):
"""Casts this storage to long type"""
return self._to(torch.long)
def int(self):
"""Casts this storage to int type"""
return self._to(torch.int)
def short(self):
"""Casts this storage to short type"""
return self._to(torch.short)
def char(self):
"""Casts this storage to char type"""
return self._to(torch.int8)
def byte(self):
"""Casts this storage to byte type"""
return self._to(torch.uint8)
def bool(self):
"""Casts this storage to bool type"""
return self._to(torch.bool)
def bfloat16(self):
"""Casts this storage to bfloat16 type"""
return self._to(torch.bfloat16)
def complex_double(self):
"""Casts this storage to complex double type"""
return self._to(torch.cdouble)
def complex_float(self):
"""Casts this storage to complex float type"""
return self._to(torch.cfloat)
@classmethod
def from_file(cls, filename, shared, size):
"""
from_file(filename, shared=False, size=0) -> Storage
If `shared` is `True`, then memory is shared between all processes.
All changes are written to the file. If `shared` is `False`, then the changes on
the storage do not affect the file.
`size` is the number of elements in the storage. If `shared` is `False`,
then the file must contain at least `size * sizeof(Type)` bytes
(`Type` is the type of storage). If `shared` is `True` the file will be
created if needed.
Args:
filename (str): file name to map
shared (bool): whether to share memory
size (int): number of elements in the storage
"""
if cls == TypedStorage:
raise RuntimeError('from_file can only be called on derived classes')
untyped_storage: UntypedStorage = UntypedStorage.from_file(
filename,
shared,
size * torch._utils._element_size(cls.dtype))
storage = cls(wrap_storage=untyped_storage)
return storage
@classmethod
def _expired(cls, *args, **kwargs):
return UntypedStorage._expired(*args, **kwargs)
def is_pinned(self):
return self._storage.is_pinned()
def _write_file(self, *args, **kwargs):
return self._storage._write_file(*args, **kwargs)
def _set_from_file(self, *args, **kwargs):
return self._storage._set_from_file(*args, **kwargs)
def _set_cdata(self, *args, **kwargs):
return self._storage._set_cdata(*args, **kwargs)
def _share_cuda_(self, *args, **kwargs):
return self._storage._share_cuda_(*args, **kwargs)
def is_shared(self):
return self._storage.is_shared()
@classmethod
def _new_shared_cuda(cls, *args, **kwargs):
return torch.UntypedStorage._new_shared_cuda(*args, **kwargs)
def _share_filename_cpu_(self, *args, **kwargs):
manager_handle, storage_handle, size = self._storage._share_filename_cpu_(*args, **kwargs)
return manager_handle, storage_handle, size // self.element_size()
def _shared_decref(self):
self._storage._shared_decref()
return self
@classmethod
def _release_ipc_counter(cls, *args, device=None, **kwargs):
return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs)
def _shared_incref(self, *args, **kwargs):
return self._storage._shared_incref(*args, **kwargs)
def _share_fd_cpu_(self, *args, **kwargs):
fd, size = self._storage._share_fd_cpu_(*args, **kwargs)
return fd, size // self.element_size()
def _get_legacy_storage_class(self):
if self.dtype not in _dtype_to_storage_type_map():
return None
storage_name = _dtype_to_storage_type_map()[self.dtype]
if self.device.type not in ['cpu', 'cuda']:
return None
module = torch if self.device.type == 'cpu' else torch.cuda
try:
return getattr(module, storage_name)
except AttributeError:
return None
TypedStorage.type.__doc__ = _type.__doc__
TypedStorage.cuda.__doc__ = _cuda.__doc__
class _LegacyStorageMeta(type):
dtype: torch.dtype
def __instancecheck__(cls, instance):
if type(instance) == TypedStorage:
cls_device = 'cuda' if cls.__module__ == 'torch.cuda' else 'cpu'
return (cls_device == instance.device.type) and (cls.dtype == instance.dtype)
return False
class _LegacyStorage(TypedStorage, metaclass=_LegacyStorageMeta):
@classmethod
def _new_shared(cls, size):
"""Creates a new storage in shared memory with the same data type"""
untyped_storage = torch.UntypedStorage._new_shared(size * cls().element_size())
return cls(wrap_storage=untyped_storage)
@classmethod
def _release_ipc_counter(cls, *args, **kwargs):
return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs)
@classmethod
def _new_shared_filename(cls, manager, obj, size):
bytes_size = size * torch._utils._element_size(cls.dtype)
return cls(wrap_storage=torch.UntypedStorage._new_shared_filename_cpu(manager, obj, bytes_size))
def _get_dtype_from_pickle_storage_type(pickle_storage_type: str):
try:
return _storage_type_to_dtype_map()[pickle_storage_type]
except KeyError:
raise KeyError(
f'pickle storage type "{pickle_storage_type}" is not recognized')
|
pytorch-master
|
torch/storage.py
|
"""Locally Optimal Block Preconditioned Conjugate Gradient methods.
"""
# Author: Pearu Peterson
# Created: February 2020
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor
from . import _linalg_utils as _utils
from .overrides import handle_torch_function, has_torch_function
__all__ = ["lobpcg"]
def _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U):
# compute F, such that F_ij = (d_j - d_i)^{-1} for i != j, F_ii = 0
F = D.unsqueeze(-2) - D.unsqueeze(-1)
F.diagonal(dim1=-2, dim2=-1).fill_(float("inf"))
F.pow_(-1)
# A.grad = U (D.grad + (U^T U.grad * F)) U^T
Ut = U.mT.contiguous()
res = torch.matmul(
U, torch.matmul(torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F, Ut)
)
return res
def _polynomial_coefficients_given_roots(roots):
"""
Given the `roots` of a polynomial, find the polynomial's coefficients.
If roots = (r_1, ..., r_n), then the method returns
coefficients (a_0, a_1, ..., a_n (== 1)) so that
p(x) = (x - r_1) * ... * (x - r_n)
= x^n + a_{n-1} * x^{n-1} + ... a_1 * x_1 + a_0
Note: for better performance requires writing a low-level kernel
"""
poly_order = roots.shape[-1]
poly_coeffs_shape = list(roots.shape)
# we assume p(x) = x^n + a_{n-1} * x^{n-1} + ... + a_1 * x + a_0,
# so poly_coeffs = {a_0, ..., a_n, a_{n+1}(== 1)},
# but we insert one extra coefficient to enable better vectorization below
poly_coeffs_shape[-1] += 2
poly_coeffs = roots.new_zeros(poly_coeffs_shape)
poly_coeffs[..., 0] = 1
poly_coeffs[..., -1] = 1
# perform the Horner's rule
for i in range(1, poly_order + 1):
# note that it is computationally hard to compute backward for this method,
# because then given the coefficients it would require finding the roots and/or
# calculating the sensitivity based on the Vieta's theorem.
# So the code below tries to circumvent the explicit root finding by series
# of operations on memory copies imitating the Horner's method.
# The memory copies are required to construct nodes in the computational graph
# by exploting the explicit (not in-place, separate node for each step)
# recursion of the Horner's method.
# Needs more memory, O(... * k^2), but with only O(... * k^2) complexity.
poly_coeffs_new = poly_coeffs.clone() if roots.requires_grad else poly_coeffs
out = poly_coeffs_new.narrow(-1, poly_order - i, i + 1)
out -= roots.narrow(-1, i - 1, 1) * poly_coeffs.narrow(
-1, poly_order - i + 1, i + 1
)
poly_coeffs = poly_coeffs_new
return poly_coeffs.narrow(-1, 1, poly_order + 1)
def _polynomial_value(poly, x, zero_power, transition):
"""
A generic method for computing poly(x) using the Horner's rule.
Args:
poly (Tensor): the (possibly batched) 1D Tensor representing
polynomial coefficients such that
poly[..., i] = (a_{i_0}, ..., a{i_n} (==1)), and
poly(x) = poly[..., 0] * zero_power + ... + poly[..., n] * x^n
x (Tensor): the value (possible batched) to evalate the polynomial `poly` at.
zero_power (Tensor): the represenation of `x^0`. It is application-specific.
transition (Callable): the function that accepts some intermediate result `int_val`,
the `x` and a specific polynomial coefficient
`poly[..., k]` for some iteration `k`.
It basically performs one iteration of the Horner's rule
defined as `x * int_val + poly[..., k] * zero_power`.
Note that `zero_power` is not a parameter,
because the step `+ poly[..., k] * zero_power` depends on `x`,
whether it is a vector, a matrix, or something else, so this
functionality is delegated to the user.
"""
res = zero_power.clone()
for k in range(poly.size(-1) - 2, -1, -1):
res = transition(res, x, poly[..., k])
return res
def _matrix_polynomial_value(poly, x, zero_power=None):
"""
Evaluates `poly(x)` for the (batched) matrix input `x`.
Check out `_polynomial_value` function for more details.
"""
# matrix-aware Horner's rule iteration
def transition(curr_poly_val, x, poly_coeff):
res = x.matmul(curr_poly_val)
res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1))
return res
if zero_power is None:
zero_power = torch.eye(
x.size(-1), x.size(-1), dtype=x.dtype, device=x.device
).view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1))
return _polynomial_value(poly, x, zero_power, transition)
def _vector_polynomial_value(poly, x, zero_power=None):
"""
Evaluates `poly(x)` for the (batched) vector input `x`.
Check out `_polynomial_value` function for more details.
"""
# vector-aware Horner's rule iteration
def transition(curr_poly_val, x, poly_coeff):
res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)
return res
if zero_power is None:
zero_power = x.new_ones(1).expand(x.shape)
return _polynomial_value(poly, x, zero_power, transition)
def _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest):
# compute a projection operator onto an orthogonal subspace spanned by the
# columns of U defined as (I - UU^T)
Ut = U.mT.contiguous()
proj_U_ortho = -U.matmul(Ut)
proj_U_ortho.diagonal(dim1=-2, dim2=-1).add_(1)
# compute U_ortho, a basis for the orthogonal complement to the span(U),
# by projecting a random [..., m, m - k] matrix onto the subspace spanned
# by the columns of U.
#
# fix generator for determinism
gen = torch.Generator(A.device)
# orthogonal complement to the span(U)
U_ortho = proj_U_ortho.matmul(
torch.randn(
(*A.shape[:-1], A.size(-1) - D.size(-1)),
dtype=A.dtype,
device=A.device,
generator=gen,
)
)
U_ortho_t = U_ortho.mT.contiguous()
# compute the coefficients of the characteristic polynomial of the tensor D.
# Note that D is diagonal, so the diagonal elements are exactly the roots
# of the characteristic polynomial.
chr_poly_D = _polynomial_coefficients_given_roots(D)
# the code belows finds the explicit solution to the Sylvester equation
# U_ortho^T A U_ortho dX - dX D = -U_ortho^T A U
# and incorporates it into the whole gradient stored in the `res` variable.
#
# Equivalent to the following naive implementation:
# res = A.new_zeros(A.shape)
# p_res = A.new_zeros(*A.shape[:-1], D.size(-1))
# for k in range(1, chr_poly_D.size(-1)):
# p_res.zero_()
# for i in range(0, k):
# p_res += (A.matrix_power(k - 1 - i) @ U_grad) * D.pow(i).unsqueeze(-2)
# res -= chr_poly_D[k] * (U_ortho @ poly_D_at_A.inverse() @ U_ortho_t @ p_res @ U.t())
#
# Note that dX is a differential, so the gradient contribution comes from the backward sensitivity
# Tr(f(U_grad, D_grad, A, U, D)^T dX) = Tr(g(U_grad, A, U, D)^T dA) for some functions f and g,
# and we need to compute g(U_grad, A, U, D)
#
# The naive implementation is based on the paper
# Hu, Qingxi, and Daizhan Cheng.
# "The polynomial solution to the Sylvester matrix equation."
# Applied mathematics letters 19.9 (2006): 859-864.
#
# We can modify the computation of `p_res` from above in a more efficient way
# p_res = U_grad * (chr_poly_D[1] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k)).unsqueeze(-2)
# + A U_grad * (chr_poly_D[2] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k - 1)).unsqueeze(-2)
# + ...
# + A.matrix_power(k - 1) U_grad * chr_poly_D[k]
# Note that this saves us from redundant matrix products with A (elimination of matrix_power)
U_grad_projected = U_grad
series_acc = U_grad_projected.new_zeros(U_grad_projected.shape)
for k in range(1, chr_poly_D.size(-1)):
poly_D = _vector_polynomial_value(chr_poly_D[..., k:], D)
series_acc += U_grad_projected * poly_D.unsqueeze(-2)
U_grad_projected = A.matmul(U_grad_projected)
# compute chr_poly_D(A) which essentially is:
#
# chr_poly_D_at_A = A.new_zeros(A.shape)
# for k in range(chr_poly_D.size(-1)):
# chr_poly_D_at_A += chr_poly_D[k] * A.matrix_power(k)
#
# Note, however, for better performance we use the Horner's rule
chr_poly_D_at_A = _matrix_polynomial_value(chr_poly_D, A)
# compute the action of `chr_poly_D_at_A` restricted to U_ortho_t
chr_poly_D_at_A_to_U_ortho = torch.matmul(
U_ortho_t, torch.matmul(chr_poly_D_at_A, U_ortho)
)
# we need to invert 'chr_poly_D_at_A_to_U_ortho`, for that we compute its
# Cholesky decomposition and then use `torch.cholesky_solve` for better stability.
# Cholesky decomposition requires the input to be positive-definite.
# Note that `chr_poly_D_at_A_to_U_ortho` is positive-definite if
# 1. `largest` == False, or
# 2. `largest` == True and `k` is even
# under the assumption that `A` has distinct eigenvalues.
#
# check if `chr_poly_D_at_A_to_U_ortho` is positive-definite or negative-definite
chr_poly_D_at_A_to_U_ortho_sign = -1 if (largest and (k % 2 == 1)) else +1
chr_poly_D_at_A_to_U_ortho_L = torch.linalg.cholesky(
chr_poly_D_at_A_to_U_ortho_sign * chr_poly_D_at_A_to_U_ortho
)
# compute the gradient part in span(U)
res = _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
# incorporate the Sylvester equation solution into the full gradient
# it resides in span(U_ortho)
res -= U_ortho.matmul(
chr_poly_D_at_A_to_U_ortho_sign
* torch.cholesky_solve(
U_ortho_t.matmul(series_acc), chr_poly_D_at_A_to_U_ortho_L
)
).matmul(Ut)
return res
def _symeig_backward(D_grad, U_grad, A, D, U, largest):
# if `U` is square, then the columns of `U` is a complete eigenspace
if U.size(-1) == U.size(-2):
return _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
else:
return _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest)
class LOBPCGAutogradFunction(torch.autograd.Function):
@staticmethod
def forward( # type: ignore[override]
ctx,
A: Tensor,
k: Optional[int] = None,
B: Optional[Tensor] = None,
X: Optional[Tensor] = None,
n: Optional[int] = None,
iK: Optional[Tensor] = None,
niter: Optional[int] = None,
tol: Optional[float] = None,
largest: Optional[bool] = None,
method: Optional[str] = None,
tracker: None = None,
ortho_iparams: Optional[Dict[str, int]] = None,
ortho_fparams: Optional[Dict[str, float]] = None,
ortho_bparams: Optional[Dict[str, bool]] = None,
) -> Tuple[Tensor, Tensor]:
# makes sure that input is contiguous for efficiency.
# Note: autograd does not support dense gradients for sparse input yet.
A = A.contiguous() if (not A.is_sparse) else A
if B is not None:
B = B.contiguous() if (not B.is_sparse) else B
D, U = _lobpcg(
A,
k,
B,
X,
n,
iK,
niter,
tol,
largest,
method,
tracker,
ortho_iparams,
ortho_fparams,
ortho_bparams,
)
ctx.save_for_backward(A, B, D, U)
ctx.largest = largest
return D, U
@staticmethod
def backward(ctx, D_grad, U_grad):
A_grad = B_grad = None
grads = [None] * 14
A, B, D, U = ctx.saved_tensors
largest = ctx.largest
# lobpcg.backward has some limitations. Checks for unsupported input
if A.is_sparse or (B is not None and B.is_sparse and ctx.needs_input_grad[2]):
raise ValueError(
"lobpcg.backward does not support sparse input yet."
"Note that lobpcg.forward does though."
)
if (
A.dtype in (torch.complex64, torch.complex128)
or B is not None
and B.dtype in (torch.complex64, torch.complex128)
):
raise ValueError(
"lobpcg.backward does not support complex input yet."
"Note that lobpcg.forward does though."
)
if B is not None:
raise ValueError(
"lobpcg.backward does not support backward with B != I yet."
)
if largest is None:
largest = True
# symeig backward
if B is None:
A_grad = _symeig_backward(D_grad, U_grad, A, D, U, largest)
# A has index 0
grads[0] = A_grad
# B has index 2
grads[2] = B_grad
return tuple(grads)
def lobpcg(
A: Tensor,
k: Optional[int] = None,
B: Optional[Tensor] = None,
X: Optional[Tensor] = None,
n: Optional[int] = None,
iK: Optional[Tensor] = None,
niter: Optional[int] = None,
tol: Optional[float] = None,
largest: Optional[bool] = None,
method: Optional[str] = None,
tracker: None = None,
ortho_iparams: Optional[Dict[str, int]] = None,
ortho_fparams: Optional[Dict[str, float]] = None,
ortho_bparams: Optional[Dict[str, bool]] = None,
) -> Tuple[Tensor, Tensor]:
"""Find the k largest (or smallest) eigenvalues and the corresponding
eigenvectors of a symmetric positive definite generalized
eigenvalue problem using matrix-free LOBPCG methods.
This function is a front-end to the following LOBPCG algorithms
selectable via `method` argument:
`method="basic"` - the LOBPCG method introduced by Andrew
Knyazev, see [Knyazev2001]. A less robust method, may fail when
Cholesky is applied to singular input.
`method="ortho"` - the LOBPCG method with orthogonal basis
selection [StathopoulosEtal2002]. A robust method.
Supported inputs are dense, sparse, and batches of dense matrices.
.. note:: In general, the basic method spends least time per
iteration. However, the robust methods converge much faster and
are more stable. So, the usage of the basic method is generally
not recommended but there exist cases where the usage of the
basic method may be preferred.
.. warning:: The backward method does not support sparse and complex inputs.
It works only when `B` is not provided (i.e. `B == None`).
We are actively working on extensions, and the details of
the algorithms are going to be published promptly.
.. warning:: While it is assumed that `A` is symmetric, `A.grad` is not.
To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric
in first-order optimization routines, prior to running `lobpcg`
we do the following symmetrization map: `A -> (A + A.t()) / 2`.
The map is performed only when the `A` requires gradients.
Args:
A (Tensor): the input tensor of size :math:`(*, m, m)`
B (Tensor, optional): the input tensor of size :math:`(*, m,
m)`. When not specified, `B` is interpereted as
identity matrix.
X (tensor, optional): the input tensor of size :math:`(*, m, n)`
where `k <= n <= m`. When specified, it is used as
initial approximation of eigenvectors. X must be a
dense tensor.
iK (tensor, optional): the input tensor of size :math:`(*, m,
m)`. When specified, it will be used as preconditioner.
k (integer, optional): the number of requested
eigenpairs. Default is the number of :math:`X`
columns (when specified) or `1`.
n (integer, optional): if :math:`X` is not specified then `n`
specifies the size of the generated random
approximation of eigenvectors. Default value for `n`
is `k`. If :math:`X` is specified, the value of `n`
(when specified) must be the number of :math:`X`
columns.
tol (float, optional): residual tolerance for stopping
criterion. Default is `feps ** 0.5` where `feps` is
smallest non-zero floating-point number of the given
input tensor `A` data type.
largest (bool, optional): when True, solve the eigenproblem for
the largest eigenvalues. Otherwise, solve the
eigenproblem for smallest eigenvalues. Default is
`True`.
method (str, optional): select LOBPCG method. See the
description of the function above. Default is
"ortho".
niter (int, optional): maximum number of iterations. When
reached, the iteration process is hard-stopped and
the current approximation of eigenpairs is returned.
For infinite iteration but until convergence criteria
is met, use `-1`.
tracker (callable, optional) : a function for tracing the
iteration process. When specified, it is called at
each iteration step with LOBPCG instance as an
argument. The LOBPCG instance holds the full state of
the iteration process in the following attributes:
`iparams`, `fparams`, `bparams` - dictionaries of
integer, float, and boolean valued input
parameters, respectively
`ivars`, `fvars`, `bvars`, `tvars` - dictionaries
of integer, float, boolean, and Tensor valued
iteration variables, respectively.
`A`, `B`, `iK` - input Tensor arguments.
`E`, `X`, `S`, `R` - iteration Tensor variables.
For instance:
`ivars["istep"]` - the current iteration step
`X` - the current approximation of eigenvectors
`E` - the current approximation of eigenvalues
`R` - the current residual
`ivars["converged_count"]` - the current number of converged eigenpairs
`tvars["rerr"]` - the current state of convergence criteria
Note that when `tracker` stores Tensor objects from
the LOBPCG instance, it must make copies of these.
If `tracker` sets `bvars["force_stop"] = True`, the
iteration process will be hard-stopped.
ortho_iparams, ortho_fparams, ortho_bparams (dict, optional):
various parameters to LOBPCG algorithm when using
`method="ortho"`.
Returns:
E (Tensor): tensor of eigenvalues of size :math:`(*, k)`
X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)`
References:
[Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal
Preconditioned Eigensolver: Locally Optimal Block Preconditioned
Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2),
517-541. (25 pages)
https://epubs.siam.org/doi/abs/10.1137/S1064827500366124
[StathopoulosEtal2002] Andreas Stathopoulos and Kesheng
Wu. (2002) A Block Orthogonalization Procedure with Constant
Synchronization Requirements. SIAM J. Sci. Comput., 23(6),
2165-2182. (18 pages)
https://epubs.siam.org/doi/10.1137/S1064827500370883
[DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming
Gu. (2018) A Robust and Efficient Implementation of LOBPCG.
SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages)
https://epubs.siam.org/doi/abs/10.1137/17M1129830
"""
if not torch.jit.is_scripting():
tensor_ops = (A, B, X, iK)
if not set(map(type, tensor_ops)).issubset(
(torch.Tensor, type(None))
) and has_torch_function(tensor_ops):
return handle_torch_function(
lobpcg,
tensor_ops,
A,
k=k,
B=B,
X=X,
n=n,
iK=iK,
niter=niter,
tol=tol,
largest=largest,
method=method,
tracker=tracker,
ortho_iparams=ortho_iparams,
ortho_fparams=ortho_fparams,
ortho_bparams=ortho_bparams,
)
if not torch._jit_internal.is_scripting():
if A.requires_grad or (B is not None and B.requires_grad):
# While it is expected that `A` is symmetric,
# the `A_grad` might be not. Therefore we perform the trick below,
# so that `A_grad` becomes symmetric.
# The symmetrization is important for first-order optimization methods,
# so that (A - alpha * A_grad) is still a symmetric matrix.
# Same holds for `B`.
A_sym = (A + A.mT) / 2
B_sym = (B + B.mT) / 2 if (B is not None) else None
return LOBPCGAutogradFunction.apply(
A_sym,
k,
B_sym,
X,
n,
iK,
niter,
tol,
largest,
method,
tracker,
ortho_iparams,
ortho_fparams,
ortho_bparams,
)
else:
if A.requires_grad or (B is not None and B.requires_grad):
raise RuntimeError(
"Script and require grads is not supported atm."
"If you just want to do the forward, use .detach()"
"on A and B before calling into lobpcg"
)
return _lobpcg(
A,
k,
B,
X,
n,
iK,
niter,
tol,
largest,
method,
tracker,
ortho_iparams,
ortho_fparams,
ortho_bparams,
)
def _lobpcg(
A: Tensor,
k: Optional[int] = None,
B: Optional[Tensor] = None,
X: Optional[Tensor] = None,
n: Optional[int] = None,
iK: Optional[Tensor] = None,
niter: Optional[int] = None,
tol: Optional[float] = None,
largest: Optional[bool] = None,
method: Optional[str] = None,
tracker: None = None,
ortho_iparams: Optional[Dict[str, int]] = None,
ortho_fparams: Optional[Dict[str, float]] = None,
ortho_bparams: Optional[Dict[str, bool]] = None,
) -> Tuple[Tensor, Tensor]:
# A must be square:
assert A.shape[-2] == A.shape[-1], A.shape
if B is not None:
# A and B must have the same shapes:
assert A.shape == B.shape, (A.shape, B.shape)
dtype = _utils.get_floating_dtype(A)
device = A.device
if tol is None:
feps = {torch.float32: 1.2e-07, torch.float64: 2.23e-16}[dtype]
tol = feps**0.5
m = A.shape[-1]
k = (1 if X is None else X.shape[-1]) if k is None else k
n = (k if n is None else n) if X is None else X.shape[-1]
if m < 3 * n:
raise ValueError(
"LPBPCG algorithm is not applicable when the number of A rows (={})"
" is smaller than 3 x the number of requested eigenpairs (={})".format(m, n)
)
method = "ortho" if method is None else method
iparams = {
"m": m,
"n": n,
"k": k,
"niter": 1000 if niter is None else niter,
}
fparams = {
"tol": tol,
}
bparams = {"largest": True if largest is None else largest}
if method == "ortho":
if ortho_iparams is not None:
iparams.update(ortho_iparams)
if ortho_fparams is not None:
fparams.update(ortho_fparams)
if ortho_bparams is not None:
bparams.update(ortho_bparams)
iparams["ortho_i_max"] = iparams.get("ortho_i_max", 3)
iparams["ortho_j_max"] = iparams.get("ortho_j_max", 3)
fparams["ortho_tol"] = fparams.get("ortho_tol", tol)
fparams["ortho_tol_drop"] = fparams.get("ortho_tol_drop", tol)
fparams["ortho_tol_replace"] = fparams.get("ortho_tol_replace", tol)
bparams["ortho_use_drop"] = bparams.get("ortho_use_drop", False)
if not torch.jit.is_scripting():
LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[assignment]
if len(A.shape) > 2:
N = int(torch.prod(torch.tensor(A.shape[:-2])))
bA = A.reshape((N,) + A.shape[-2:])
bB = B.reshape((N,) + A.shape[-2:]) if B is not None else None
bX = X.reshape((N,) + X.shape[-2:]) if X is not None else None
bE = torch.empty((N, k), dtype=dtype, device=device)
bXret = torch.empty((N, m, k), dtype=dtype, device=device)
for i in range(N):
A_ = bA[i]
B_ = bB[i] if bB is not None else None
X_ = (
torch.randn((m, n), dtype=dtype, device=device) if bX is None else bX[i]
)
assert len(X_.shape) == 2 and X_.shape == (m, n), (X_.shape, (m, n))
iparams["batch_index"] = i
worker = LOBPCG(A_, B_, X_, iK, iparams, fparams, bparams, method, tracker)
worker.run()
bE[i] = worker.E[:k]
bXret[i] = worker.X[:, :k]
if not torch.jit.is_scripting():
LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k))
X = torch.randn((m, n), dtype=dtype, device=device) if X is None else X
assert len(X.shape) == 2 and X.shape == (m, n), (X.shape, (m, n))
worker = LOBPCG(A, B, X, iK, iparams, fparams, bparams, method, tracker)
worker.run()
if not torch.jit.is_scripting():
LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
return worker.E[:k], worker.X[:, :k]
class LOBPCG(object):
"""Worker class of LOBPCG methods."""
def __init__(
self,
A: Optional[Tensor],
B: Optional[Tensor],
X: Tensor,
iK: Optional[Tensor],
iparams: Dict[str, int],
fparams: Dict[str, float],
bparams: Dict[str, bool],
method: str,
tracker: None,
) -> None:
# constant parameters
self.A = A
self.B = B
self.iK = iK
self.iparams = iparams
self.fparams = fparams
self.bparams = bparams
self.method = method
self.tracker = tracker
m = iparams["m"]
n = iparams["n"]
# variable parameters
self.X = X
self.E = torch.zeros((n,), dtype=X.dtype, device=X.device)
self.R = torch.zeros((m, n), dtype=X.dtype, device=X.device)
self.S = torch.zeros((m, 3 * n), dtype=X.dtype, device=X.device)
self.tvars: Dict[str, Tensor] = {}
self.ivars: Dict[str, int] = {"istep": 0}
self.fvars: Dict[str, float] = {"_": 0.0}
self.bvars: Dict[str, bool] = {"_": False}
def __str__(self):
lines = ["LOPBCG:"]
lines += [" iparams={}".format(self.iparams)]
lines += [" fparams={}".format(self.fparams)]
lines += [" bparams={}".format(self.bparams)]
lines += [" ivars={}".format(self.ivars)]
lines += [" fvars={}".format(self.fvars)]
lines += [" bvars={}".format(self.bvars)]
lines += [" tvars={}".format(self.tvars)]
lines += [" A={}".format(self.A)]
lines += [" B={}".format(self.B)]
lines += [" iK={}".format(self.iK)]
lines += [" X={}".format(self.X)]
lines += [" E={}".format(self.E)]
r = ""
for line in lines:
r += line + "\n"
return r
def update(self):
"""Set and update iteration variables."""
if self.ivars["istep"] == 0:
X_norm = float(torch.norm(self.X))
iX_norm = X_norm**-1
A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm
B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm
self.fvars["X_norm"] = X_norm
self.fvars["A_norm"] = A_norm
self.fvars["B_norm"] = B_norm
self.ivars["iterations_left"] = self.iparams["niter"]
self.ivars["converged_count"] = 0
self.ivars["converged_end"] = 0
if self.method == "ortho":
self._update_ortho()
else:
self._update_basic()
self.ivars["iterations_left"] = self.ivars["iterations_left"] - 1
self.ivars["istep"] = self.ivars["istep"] + 1
def update_residual(self):
"""Update residual R from A, B, X, E."""
mm = _utils.matmul
self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E
def update_converged_count(self):
"""Determine the number of converged eigenpairs using backward stable
convergence criterion, see discussion in Sec 4.3 of [DuerschEtal2018].
Users may redefine this method for custom convergence criteria.
"""
# (...) -> int
prev_count = self.ivars["converged_count"]
tol = self.fparams["tol"]
A_norm = self.fvars["A_norm"]
B_norm = self.fvars["B_norm"]
E, X, R = self.E, self.X, self.R
rerr = (
torch.norm(R, 2, (0,))
* (torch.norm(X, 2, (0,)) * (A_norm + E[: X.shape[-1]] * B_norm)) ** -1
)
converged = rerr < tol
count = 0
for b in converged:
if not b:
# ignore convergence of following pairs to ensure
# strict ordering of eigenpairs
break
count += 1
assert count >= prev_count, (
"the number of converged eigenpairs "
"(was {}, got {}) cannot decrease".format(prev_count, count)
)
self.ivars["converged_count"] = count
self.tvars["rerr"] = rerr
return count
def stop_iteration(self):
"""Return True to stop iterations.
Note that tracker (if defined) can force-stop iterations by
setting ``worker.bvars['force_stop'] = True``.
"""
return (
self.bvars.get("force_stop", False)
or self.ivars["iterations_left"] == 0
or self.ivars["converged_count"] >= self.iparams["k"]
)
def run(self):
"""Run LOBPCG iterations.
Use this method as a template for implementing LOBPCG
iteration scheme with custom tracker that is compatible with
TorchScript.
"""
self.update()
if not torch.jit.is_scripting() and self.tracker is not None:
self.call_tracker()
while not self.stop_iteration():
self.update()
if not torch.jit.is_scripting() and self.tracker is not None:
self.call_tracker()
@torch.jit.unused
def call_tracker(self):
"""Interface for tracking iteration process in Python mode.
Tracking the iteration process is disabled in TorchScript
mode. In fact, one should specify tracker=None when JIT
compiling functions using lobpcg.
"""
# do nothing when in TorchScript mode
pass
# Internal methods
def _update_basic(self):
"""
Update or initialize iteration variables when `method == "basic"`.
"""
mm = torch.matmul
ns = self.ivars["converged_end"]
nc = self.ivars["converged_count"]
n = self.iparams["n"]
largest = self.bparams["largest"]
if self.ivars["istep"] == 0:
Ri = self._get_rayleigh_ritz_transform(self.X)
M = _utils.qform(_utils.qform(self.A, self.X), Ri)
E, Z = _utils.symeig(M, largest)
self.X[:] = mm(self.X, mm(Ri, Z))
self.E[:] = E
np = 0
self.update_residual()
nc = self.update_converged_count()
self.S[..., :n] = self.X
W = _utils.matmul(self.iK, self.R)
self.ivars["converged_end"] = ns = n + np + W.shape[-1]
self.S[:, n + np : ns] = W
else:
S_ = self.S[:, nc:ns]
Ri = self._get_rayleigh_ritz_transform(S_)
M = _utils.qform(_utils.qform(self.A, S_), Ri)
E_, Z = _utils.symeig(M, largest)
self.X[:, nc:] = mm(S_, mm(Ri, Z[:, : n - nc]))
self.E[nc:] = E_[: n - nc]
P = mm(S_, mm(Ri, Z[:, n : 2 * n - nc]))
np = P.shape[-1]
self.update_residual()
nc = self.update_converged_count()
self.S[..., :n] = self.X
self.S[:, n : n + np] = P
W = _utils.matmul(self.iK, self.R[:, nc:])
self.ivars["converged_end"] = ns = n + np + W.shape[-1]
self.S[:, n + np : ns] = W
def _update_ortho(self):
"""
Update or initialize iteration variables when `method == "ortho"`.
"""
mm = torch.matmul
ns = self.ivars["converged_end"]
nc = self.ivars["converged_count"]
n = self.iparams["n"]
largest = self.bparams["largest"]
if self.ivars["istep"] == 0:
Ri = self._get_rayleigh_ritz_transform(self.X)
M = _utils.qform(_utils.qform(self.A, self.X), Ri)
E, Z = _utils.symeig(M, largest)
self.X = mm(self.X, mm(Ri, Z))
self.update_residual()
np = 0
nc = self.update_converged_count()
self.S[:, :n] = self.X
W = self._get_ortho(self.R, self.X)
ns = self.ivars["converged_end"] = n + np + W.shape[-1]
self.S[:, n + np : ns] = W
else:
S_ = self.S[:, nc:ns]
# Rayleigh-Ritz procedure
E_, Z = _utils.symeig(_utils.qform(self.A, S_), largest)
# Update E, X, P
self.X[:, nc:] = mm(S_, Z[:, : n - nc])
self.E[nc:] = E_[: n - nc]
P = mm(
S_,
mm(
Z[:, n - nc :],
_utils.basis(_utils.transpose(Z[: n - nc, n - nc :])),
),
)
np = P.shape[-1]
# check convergence
self.update_residual()
nc = self.update_converged_count()
# update S
self.S[:, :n] = self.X
self.S[:, n : n + np] = P
W = self._get_ortho(self.R[:, nc:], self.S[:, : n + np])
ns = self.ivars["converged_end"] = n + np + W.shape[-1]
self.S[:, n + np : ns] = W
def _get_rayleigh_ritz_transform(self, S):
"""Return a transformation matrix that is used in Rayleigh-Ritz
procedure for reducing a general eigenvalue problem :math:`(S^TAS)
C = (S^TBS) C E` to a standard eigenvalue problem :math: `(Ri^T
S^TAS Ri) Z = Z E` where `C = Ri Z`.
.. note:: In the original Rayleight-Ritz procedure in
[DuerschEtal2018], the problem is formulated as follows::
SAS = S^T A S
SBS = S^T B S
D = (<diagonal matrix of SBS>) ** -1/2
R^T R = Cholesky(D SBS D)
Ri = D R^-1
solve symeig problem Ri^T SAS Ri Z = Theta Z
C = Ri Z
To reduce the number of matrix products (denoted by empty
space between matrices), here we introduce element-wise
products (denoted by symbol `*`) so that the Rayleight-Ritz
procedure becomes::
SAS = S^T A S
SBS = S^T B S
d = (<diagonal of SBS>) ** -1/2 # this is 1-d column vector
dd = d d^T # this is 2-d matrix
R^T R = Cholesky(dd * SBS)
Ri = R^-1 * d # broadcasting
solve symeig problem Ri^T SAS Ri Z = Theta Z
C = Ri Z
where `dd` is 2-d matrix that replaces matrix products `D M
D` with one element-wise product `M * dd`; and `d` replaces
matrix product `D M` with element-wise product `M *
d`. Also, creating the diagonal matrix `D` is avoided.
Args:
S (Tensor): the matrix basis for the search subspace, size is
:math:`(m, n)`.
Returns:
Ri (tensor): upper-triangular transformation matrix of size
:math:`(n, n)`.
"""
B = self.B
mm = torch.matmul
SBS = _utils.qform(B, S)
d_row = SBS.diagonal(0, -2, -1) ** -0.5
d_col = d_row.reshape(d_row.shape[0], 1)
# TODO use torch.linalg.cholesky_solve once it is implemented
R = torch.linalg.cholesky((SBS * d_row) * d_col, upper=True)
return torch.linalg.solve_triangular(
R, d_row.diag_embed(), upper=True, left=False
)
def _get_svqb(
self, U: Tensor, drop: bool, tau: float # Tensor # bool # float
) -> Tensor:
"""Return B-orthonormal U.
.. note:: When `drop` is `False` then `svqb` is based on the
Algorithm 4 from [DuerschPhD2015] that is a slight
modification of the corresponding algorithm
introduced in [StathopolousWu2002].
Args:
U (Tensor) : initial approximation, size is (m, n)
drop (bool) : when True, drop columns that
contribution to the `span([U])` is small.
tau (float) : positive tolerance
Returns:
U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`), size
is (m, n1), where `n1 = n` if `drop` is `False,
otherwise `n1 <= n`.
"""
if torch.numel(U) == 0:
return U
UBU = _utils.qform(self.B, U)
d = UBU.diagonal(0, -2, -1)
# Detect and drop exact zero columns from U. While the test
# `abs(d) == 0` is unlikely to be True for random data, it is
# possible to construct input data to lobpcg where it will be
# True leading to a failure (notice the `d ** -0.5` operation
# in the original algorithm). To prevent the failure, we drop
# the exact zero columns here and then continue with the
# original algorithm below.
nz = torch.where(abs(d) != 0.0)
assert len(nz) == 1, nz
if len(nz[0]) < len(d):
U = U[:, nz[0]]
if torch.numel(U) == 0:
return U
UBU = _utils.qform(self.B, U)
d = UBU.diagonal(0, -2, -1)
nz = torch.where(abs(d) != 0.0)
assert len(nz[0]) == len(d)
# The original algorithm 4 from [DuerschPhD2015].
d_col = (d**-0.5).reshape(d.shape[0], 1)
DUBUD = (UBU * d_col) * _utils.transpose(d_col)
E, Z = _utils.symeig(DUBUD)
t = tau * abs(E).max()
if drop:
keep = torch.where(E > t)
assert len(keep) == 1, keep
E = E[keep[0]]
Z = Z[:, keep[0]]
d_col = d_col[keep[0]]
else:
E[(torch.where(E < t))[0]] = t
return torch.matmul(U * _utils.transpose(d_col), Z * E**-0.5)
def _get_ortho(self, U, V):
"""Return B-orthonormal U with columns are B-orthogonal to V.
.. note:: When `bparams["ortho_use_drop"] == False` then
`_get_ortho` is based on the Algorithm 3 from
[DuerschPhD2015] that is a slight modification of
the corresponding algorithm introduced in
[StathopolousWu2002]. Otherwise, the method
implements Algorithm 6 from [DuerschPhD2015]
.. note:: If all U columns are B-collinear to V then the
returned tensor U will be empty.
Args:
U (Tensor) : initial approximation, size is (m, n)
V (Tensor) : B-orthogonal external basis, size is (m, k)
Returns:
U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`)
such that :math:`V^T B U=0`, size is (m, n1),
where `n1 = n` if `drop` is `False, otherwise
`n1 <= n`.
"""
mm = torch.matmul
mm_B = _utils.matmul
m = self.iparams["m"]
tau_ortho = self.fparams["ortho_tol"]
tau_drop = self.fparams["ortho_tol_drop"]
tau_replace = self.fparams["ortho_tol_replace"]
i_max = self.iparams["ortho_i_max"]
j_max = self.iparams["ortho_j_max"]
# when use_drop==True, enable dropping U columns that have
# small contribution to the `span([U, V])`.
use_drop = self.bparams["ortho_use_drop"]
# clean up variables from the previous call
for vkey in list(self.fvars.keys()):
if vkey.startswith("ortho_") and vkey.endswith("_rerr"):
self.fvars.pop(vkey)
self.ivars.pop("ortho_i", 0)
self.ivars.pop("ortho_j", 0)
BV_norm = torch.norm(mm_B(self.B, V))
BU = mm_B(self.B, U)
VBU = mm(_utils.transpose(V), BU)
i = j = 0
stats = ""
for i in range(i_max):
U = U - mm(V, VBU)
drop = False
tau_svqb = tau_drop
for j in range(j_max):
if use_drop:
U = self._get_svqb(U, drop, tau_svqb)
drop = True
tau_svqb = tau_replace
else:
U = self._get_svqb(U, False, tau_replace)
if torch.numel(U) == 0:
# all initial U columns are B-collinear to V
self.ivars["ortho_i"] = i
self.ivars["ortho_j"] = j
return U
BU = mm_B(self.B, U)
UBU = mm(_utils.transpose(U), BU)
U_norm = torch.norm(U)
BU_norm = torch.norm(BU)
R = UBU - torch.eye(UBU.shape[-1], device=UBU.device, dtype=UBU.dtype)
R_norm = torch.norm(R)
# https://github.com/pytorch/pytorch/issues/33810 workaround:
rerr = float(R_norm) * float(BU_norm * U_norm) ** -1
vkey = "ortho_UBUmI_rerr[{}, {}]".format(i, j)
self.fvars[vkey] = rerr
if rerr < tau_ortho:
break
VBU = mm(_utils.transpose(V), BU)
VBU_norm = torch.norm(VBU)
U_norm = torch.norm(U)
rerr = float(VBU_norm) * float(BV_norm * U_norm) ** -1
vkey = "ortho_VBU_rerr[{}]".format(i)
self.fvars[vkey] = rerr
if rerr < tau_ortho:
break
if m < U.shape[-1] + V.shape[-1]:
# TorchScript needs the class var to be assigned to a local to
# do optional type refinement
B = self.B
assert B is not None
raise ValueError(
"Overdetermined shape of U:"
" #B-cols(={}) >= #U-cols(={}) + #V-cols(={}) must hold".format(
B.shape[-1], U.shape[-1], V.shape[-1]
)
)
self.ivars["ortho_i"] = i
self.ivars["ortho_j"] = j
return U
# Calling tracker is separated from LOBPCG definitions because
# TorchScript does not support user-defined callback arguments:
LOBPCG_call_tracker_orig = LOBPCG.call_tracker
def LOBPCG_call_tracker(self):
self.tracker(self)
|
pytorch-master
|
torch/_lobpcg.py
|
"""
This global flag controls whether to assign new tensors to the parameters
instead of changing the existing parameters in-place when converting an `nn.Module`
using the following methods:
1. `module.cuda()` / `.cpu()` (for moving `module` between devices)
2. `module.float()` / `.double()` / `.half()` (for converting `module` to a different dtype)
3. `module.to()` / `.type()` (for changing `module`'s device or dtype)
4. `module._apply(fn)` (for generic functions applied to `module`)
Default: False
"""
_overwrite_module_params_on_conversion = False
def set_overwrite_module_params_on_conversion(value):
global _overwrite_module_params_on_conversion
_overwrite_module_params_on_conversion = value
def get_overwrite_module_params_on_conversion():
return _overwrite_module_params_on_conversion
|
pytorch-master
|
torch/__future__.py
|
import io
import torch
from torch.package import Importer, OrderedImporter, PackageImporter, sys_importer
from torch.package._package_pickler import create_pickler
from torch.package._package_unpickler import PackageUnpickler
from torch.serialization import _maybe_decode_ascii
def _save_storages(importer, obj):
serialized_storages = []
serialized_dtypes = []
importer = importer if isinstance(importer, torch.package.PackageImporter) else None
importers: Importer
if importer is not None:
importers = OrderedImporter(importer, sys_importer)
else:
importers = sys_importer
def persistent_id(obj):
if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
if isinstance(obj, torch.storage.TypedStorage):
# TODO: Once we decide to break serialization FC, we can
# remove this case
storage = obj._storage
dtype = obj.dtype
else:
storage = obj
dtype = torch.uint8
serialized_storages.append(obj)
serialized_dtypes.append(dtype)
return ("storage", len(serialized_storages) - 1)
if hasattr(obj, "__reduce_deploy__"):
if _serialized_reduces.get(id(obj)) is None:
_serialized_reduces[id(obj)] = (
"reduce_deploy",
id(obj),
*obj.__reduce_deploy__(importers),
)
return _serialized_reduces[id(obj)]
return None
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = create_pickler(data_buf, importers)
pickler.persistent_id = persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
return (
data_value,
serialized_storages,
serialized_dtypes,
importer.zip_reader if importer else None,
)
def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dtypes):
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == "storage":
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
storage = serialized_storages[data[0]]
dtype = serialized_dtypes[data[0]]
return torch.storage.TypedStorage(
wrap_storage=storage.untyped(), dtype=dtype
)
if typename == "reduce_deploy":
reduce_id, func, args = data
if reduce_id not in _loaded_reduces:
_loaded_reduces[reduce_id] = func(_raw_packages[zip_reader], *args)
return _loaded_reduces[reduce_id]
return None
importer: Importer
if zip_reader is not None:
importer = OrderedImporter(_get_package(zip_reader), sys_importer)
else:
importer = sys_importer
unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes))
unpickler.persistent_load = persistent_load # type: ignore[assignment]
result = _deploy_objects[id] = unpickler.load()
return result
def _get_package(zip_reader):
if zip_reader not in _raw_packages:
_raw_packages[zip_reader] = PackageImporter(zip_reader)
return _raw_packages[zip_reader]
_raw_packages: dict = {}
_deploy_objects: dict = {}
_serialized_reduces: dict = {}
_loaded_reduces: dict = {}
|
pytorch-master
|
torch/_deploy.py
|
import re
import torch._C as C
"""
PythonDispatcher class is a thin python-binding to C++ dispatcher and it
is designed to show how dispatcher precompute works. In particular,
it shows for a certain op `foo`, what the computed dispatch table looks
like after user register their kernels to certains dispatch keys.
In the real C++ dispatcher we support many dispatch keys for different
functionalities. For simplicity PythonDispatcher only supports dispatch
keys for a single example of each use case. These use cases are listed below:
- CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference &
autograd kernel in pytorch core library.
E.g. CPU, CUDA
- FPGA/AutogradOther: represents in-tree backends which we usually have backend specific
inference kernels, but they share the same autograd kernel specified in AutogradOther.
E.g. FPGA, SparseCsrCPU
- XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd
kernel defined in pytorch core library. Backend owner is responsible for registering both
inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support.
E.g. XLA, XPU, MPS
- CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc.
Kernels registered to this key MUST work for inference for all backends.
- Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther.
Kernels registered to this key MUST work for autograd for all backends.
- CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd
Kernels registered to this key MUST work for both inference + autograd for all backends.
Note we only allow registrations to alias keys inside pytorch core library. E.g
you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd
kernel from torch-xla extension, instead you should upstream the kernel into
pytorch/pytorch repo so that it's available for all backends and continuously
tested even without the extension.
Usage:
dispatcher = PythonDispatcher()
dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"])
print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend.
# For more debugging information
# print(dispatcher.keys())
# print(dispatcher.registrations())
# print(dispatcher.rawRegistrations())
# print(dispatcher.rawDispatchTable())
PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table.
This file only provides the simplified API for developers, revelant test code is located in
test/test_dispatch.py
"""
class PythonDispatcher:
namespace = "__test__"
name = "foo"
# fmt: off
runtime_keys = [
"CPU", "AutogradCPU",
"FPGA", "AutogradOther",
"XLA", "AutogradXLA",
"Lazy", "AutogradLazy",
]
# fmt: on
alias_keys = [
"CompositeExplicitAutograd",
"Autograd",
"CompositeImplicitAutograd",
]
supported_keys = runtime_keys + alias_keys
def __init__(self):
C._dispatch_check_invariants(self.name) # type: ignore[attr-defined]
self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
self.ref.def_("foo(Tensor x) -> Tensor")
"""
Returns a list of dispatch keys supported by PythonDispatcher.
You can register kernels to these keys.
"""
def keys(self):
return self.supported_keys
"""
Register kernels to the target dispatchKeys.
dispatchKeys(list[str]): a list of dispatch keys that you want to register
your own kernel. Note that you don't need to write the kernel yourself in
this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
automatically generated and registered.
"""
def register(self, dispatchKeys):
# Overriden is not supported and triggers a warning in C++ dispatcher.
if len(set(dispatchKeys)) != len(dispatchKeys):
raise RuntimeError(
f"Overriden is not allowed but found duplicates in {dispatchKeys}."
)
# We currently forbid this in codegen instead of C++ dispatcher.
if (
"CompositeImplicitAutograd" in dispatchKeys
and "CompositeExplicitAutograd" in dispatchKeys
):
raise RuntimeError(
"Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed."
)
for key in dispatchKeys:
if key not in self.supported_keys:
raise RuntimeError(
f"{key} is not supported, please select a dispatch key in {self.supported_keys}."
)
self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
"""
Helper function to format (key, kernel).
"""
def _format_line(self, key, kernel):
return "{:<15} {}\n".format(key, kernel)
"""
Helper function to print a table header.
"""
def _format_header(self, header):
s = f"""
{header}
"""
s += self._format_line("key", "kernel")
s += "---------------------------\n"
return s
"""
Returns raw output of all registration info for debugging only.
Use registrations() for a simplified version.
"""
def rawRegistrations(self):
return C._dispatch_dump("{}::{}".format(self.namespace, self.name)) # type: ignore[attr-defined]
"""
Returns raw output of computed dispatch table for debugging only.
Use dispatchTable() for a simplified version.
"""
def rawDispatchTable(self):
return C._dispatch_dump_table("{}::{}".format(self.namespace, self.name)) # type: ignore[attr-defined]
"""
Returns a table(str) including all the registrations from users.
Note this includes registrations to both runtime keys and alias keys.
"""
def registrations(self):
output = self._format_header("Registered Kernels")
state = self.rawRegistrations()
state_entries = state.split("\n")
for line in state_entries:
first = line.split(":")[0]
if any(first.startswith(k) for k in self.supported_keys):
kernel = line.split("::")[0].split(" ")[1]
output += self._format_line(first, kernel)
return output
"""
Returns the computed dispatch table(str). Note this only include
runtime keys, registrations to alias keys have been decoded to their
mapped runtime keys.
"""
def dispatchTable(self):
output = self._format_header("Computed Dispatch Table")
table = self.rawDispatchTable()
table_entries = table.split("\n")
regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
for line in table_entries:
k = line.split(":")[0]
if k in self.runtime_keys:
entry = regex.sub("[", line)
output += self._format_line(k, entry.split(": ")[1])
return output
|
pytorch-master
|
torch/_python_dispatcher.py
|
import types
import torch._C
class _ClassNamespace(types.ModuleType):
def __init__(self, name):
super(_ClassNamespace, self).__init__("torch.classes" + name)
self.name = name
def __getattr__(self, attr):
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if proxy is None:
raise RuntimeError(f"Class {self.name}.{attr} not registered!")
return proxy
class _Classes(types.ModuleType):
__file__ = "_classes.py"
def __init__(self):
super(_Classes, self).__init__("torch.classes")
def __getattr__(self, name):
namespace = _ClassNamespace(name)
setattr(self, name, namespace)
return namespace
@property
def loaded_libraries(self):
return torch.ops.loaded_libraries
def load_library(self, path):
"""
Loads a shared library from the given path into the current process.
The library being loaded may run global initialization code to register
custom classes with the PyTorch JIT runtime. This allows dynamically
loading custom classes. For this, you should compile your class
and the static registration code into a shared library object, and then
call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
shared object.
After the library is loaded, it is added to the
``torch.classes.loaded_libraries`` attribute, a set that may be inspected
for the paths of all libraries loaded using this function.
Args:
path (str): A path to a shared library to load.
"""
torch.ops.load_library(path)
# The classes "namespace"
classes = _Classes()
|
pytorch-master
|
torch/_classes.py
|
import difflib
import os
import io
import shutil
import struct
import sys
import torch
import tarfile
import tempfile
import warnings
from contextlib import closing, contextmanager
from ._utils import _import_dotted_name
from ._six import string_classes as _string_classes
from torch._sources import get_source_lines_and_file
from torch.types import Storage
from torch.storage import _get_dtype_from_pickle_storage_type
from typing import Any, BinaryIO, cast, Dict, Optional, Type, Tuple, Union, IO
import copyreg
import pickle
import pathlib
DEFAULT_PROTOCOL = 2
LONG_SIZE = struct.Struct('=l').size
INT_SIZE = struct.Struct('=i').size
SHORT_SIZE = struct.Struct('=h').size
MAGIC_NUMBER = 0x1950a86a20f9469cfc6c
PROTOCOL_VERSION = 1001
STORAGE_KEY_SEPARATOR = ','
class SourceChangeWarning(Warning):
pass
@contextmanager
def mkdtemp():
path = tempfile.mkdtemp()
yield path
shutil.rmtree(path)
_package_registry = []
def _is_zipfile(f) -> bool:
# This is a stricter implementation than zipfile.is_zipfile().
# zipfile.is_zipfile() is True if the magic number appears anywhere in the
# binary. Since we expect the files here to be generated by torch.save or
# torch.jit.save, it's safe to only check the start bytes and avoid
# collisions and assume the zip has only 1 file.
# See bugs.python.org/issue28494.
# Read the first 4 bytes of the file
read_bytes = []
start = f.tell()
byte = f.read(1)
while byte != b"":
read_bytes.append(byte)
if len(read_bytes) == 4:
break
byte = f.read(1)
f.seek(start)
local_header_magic_number = [b'P', b'K', b'\x03', b'\x04']
return read_bytes == local_header_magic_number
def register_package(priority, tagger, deserializer):
queue_elem = (priority, tagger, deserializer)
_package_registry.append(queue_elem)
_package_registry.sort()
def check_module_version_greater_or_equal(module, req_version_tuple, error_if_malformed=True):
'''
Check if a module's version satisfies requirements
Usually, a module's version string will be like 'x.y.z', which would be represented
as a tuple (x, y, z), but sometimes it could be an unexpected format. If the version
string does not match the given tuple's format up to the length of the tuple, then
error and exit or emit a warning.
Args:
module: the module to check the version of
req_version_tuple: tuple (usually of ints) representing the required version
error_if_malformed: whether we should exit if module version string is malformed
Returns:
requirement_is_met: bool
'''
try:
version_strs = module.__version__.split('.')
# Cast module version fields to match the types of the required version
module_version = tuple(
type(req_field)(version_strs[idx]) for idx, req_field in enumerate(req_version_tuple)
)
requirement_is_met = module_version >= req_version_tuple
except Exception as e:
message = (
"'%s' module version string is malformed '%s' and cannot be compared"
" with tuple %s"
) % (
module.__name__, module.__version__, str(req_version_tuple)
)
if error_if_malformed:
raise RuntimeError(message) from e
else:
warnings.warn(message + ', but continuing assuming that requirement is met')
requirement_is_met = True
return requirement_is_met
def _cpu_tag(obj):
if obj.device.type == 'cpu':
return 'cpu'
def _cuda_tag(obj):
if obj.device.type == 'cuda':
return 'cuda:' + str(obj.device.index)
def _mps_tag(obj):
if obj.device.type == 'mps':
return 'mps'
def _meta_tag(obj):
if obj.device.type == 'meta':
return 'meta'
def _cpu_deserialize(obj, location):
if location == 'cpu':
return obj
def validate_cuda_device(location):
device = torch.cuda._utils._get_device_index(location, True)
if not torch.cuda.is_available():
raise RuntimeError('Attempting to deserialize object on a CUDA '
'device but torch.cuda.is_available() is False. '
'If you are running on a CPU-only machine, '
'please use torch.load with map_location=torch.device(\'cpu\') '
'to map your storages to the CPU.')
device_count = torch.cuda.device_count()
if device >= device_count:
raise RuntimeError('Attempting to deserialize object on CUDA device '
f'{device} but torch.cuda.device_count() is {device_count}. Please use '
'torch.load with map_location to map your storages '
'to an existing device.')
return device
def _cuda_deserialize(obj, location):
if location.startswith('cuda'):
device = validate_cuda_device(location)
if getattr(obj, "_torch_load_uninitialized", False):
with torch.cuda.device(device):
return torch.UntypedStorage(obj.nbytes(), device=torch.device(location))
else:
return obj.cuda(device)
def _mps_deserialize(obj, location):
if location == 'mps':
return obj.mps()
def _meta_deserialize(obj, location):
if location == 'meta':
return torch.UntypedStorage(obj.nbytes(), device='meta')
register_package(10, _cpu_tag, _cpu_deserialize)
register_package(20, _cuda_tag, _cuda_deserialize)
register_package(21, _mps_tag, _mps_deserialize)
register_package(22, _meta_tag, _meta_deserialize)
def location_tag(storage: Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage]):
for _, tagger, _ in _package_registry:
location = tagger(storage)
if location:
return location
raise RuntimeError("don't know how to determine data location of "
+ torch.typename(storage))
def default_restore_location(storage, location):
for _, _, fn in _package_registry:
result = fn(storage, location)
if result is not None:
return result
raise RuntimeError("don't know how to restore data location of "
+ torch.typename(storage) + " (tagged with "
+ location + ")")
def normalize_storage_type(storage_type):
return getattr(torch, storage_type.__name__)
def storage_to_tensor_type(storage):
storage_type = type(storage)
module = _import_dotted_name(storage_type.__module__)
return getattr(module, storage_type.__name__.replace('Storage', 'Tensor'))
def _is_path(name_or_buffer):
return isinstance(name_or_buffer, str) or \
isinstance(name_or_buffer, pathlib.Path)
class _opener(object):
def __init__(self, file_like):
self.file_like = file_like
def __enter__(self):
return self.file_like
def __exit__(self, *args):
pass
class _open_file(_opener):
def __init__(self, name, mode):
super(_open_file, self).__init__(open(name, mode))
def __exit__(self, *args):
self.file_like.close()
class _open_buffer_reader(_opener):
def __init__(self, buffer):
super(_open_buffer_reader, self).__init__(buffer)
_check_seekable(buffer)
class _open_buffer_writer(_opener):
def __exit__(self, *args):
self.file_like.flush()
def _open_file_like(name_or_buffer, mode):
if _is_path(name_or_buffer):
return _open_file(name_or_buffer, mode)
else:
if 'w' in mode:
return _open_buffer_writer(name_or_buffer)
elif 'r' in mode:
return _open_buffer_reader(name_or_buffer)
else:
raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}")
class _open_zipfile_reader(_opener):
def __init__(self, name_or_buffer) -> None:
super(_open_zipfile_reader, self).__init__(torch._C.PyTorchFileReader(name_or_buffer))
class _open_zipfile_writer_file(_opener):
def __init__(self, name) -> None:
super(_open_zipfile_writer_file, self).__init__(torch._C.PyTorchFileWriter(str(name)))
def __exit__(self, *args) -> None:
self.file_like.write_end_of_file()
class _open_zipfile_writer_buffer(_opener):
def __init__(self, buffer) -> None:
self.buffer = buffer
super(_open_zipfile_writer_buffer, self).__init__(torch._C.PyTorchFileWriter(buffer))
def __exit__(self, *args) -> None:
self.file_like.write_end_of_file()
self.buffer.flush()
def _open_zipfile_writer(name_or_buffer):
container: Type[_opener]
if _is_path(name_or_buffer):
container = _open_zipfile_writer_file
else:
container = _open_zipfile_writer_buffer
return container(name_or_buffer)
def _is_compressed_file(f) -> bool:
compress_modules = ['gzip']
try:
return f.__module__ in compress_modules
except AttributeError:
return False
def _should_read_directly(f):
"""
Checks if f is a file that should be read directly. It should be read
directly if it is backed by a real file (has a fileno) and is not a
a compressed file (e.g. gzip)
"""
if _is_compressed_file(f):
return False
try:
return f.fileno() >= 0
except io.UnsupportedOperation:
return False
except AttributeError:
return False
def _check_seekable(f) -> bool:
def raise_err_msg(patterns, e):
for p in patterns:
if p in str(e):
msg = (str(e) + ". You can only torch.load from a file that is seekable."
+ " Please pre-load the data into a buffer like io.BytesIO and"
+ " try to load from it instead.")
raise type(e)(msg)
raise e
try:
f.seek(f.tell())
return True
except (io.UnsupportedOperation, AttributeError) as e:
raise_err_msg(["seek", "tell"], e)
return False
def _check_dill_version(pickle_module) -> None:
'''Checks if using dill as the pickle module, and if so, checks if it is the correct version.
If dill version is lower than 0.3.1, a ValueError is raised.
Args:
pickle_module: module used for pickling metadata and objects
'''
if pickle_module.__name__ == 'dill':
required_dill_version = (0, 3, 1)
if not check_module_version_greater_or_equal(pickle_module, required_dill_version, False):
raise ValueError((
"'torch' supports dill >= %s, but you have dill %s."
" Please upgrade dill or switch to 'pickle'"
) % (
'.'.join([str(num) for num in required_dill_version]),
pickle_module.__version__
))
def save(obj, f: Union[str, os.PathLike, BinaryIO, IO[bytes]],
pickle_module=pickle, pickle_protocol=DEFAULT_PROTOCOL, _use_new_zipfile_serialization=True) -> None:
# Reference: https://github.com/pytorch/pytorch/issues/54354
# The first line of this docstring overrides the one Sphinx generates for the
# documentation. We need it so that Sphinx doesn't leak `pickle`s path from
# the build environment (e.g. `<module 'pickle' from '/leaked/path').
"""save(obj, f, pickle_module=pickle, pickle_protocol=DEFAULT_PROTOCOL, _use_new_zipfile_serialization=True)
Saves an object to a disk file.
See also: :ref:`saving-loading-tensors`
Args:
obj: saved object
f: a file-like object (has to implement write and flush) or a string or
os.PathLike object containing a file name
pickle_module: module used for pickling metadata and objects
pickle_protocol: can be specified to override the default protocol
.. note::
A common PyTorch convention is to save tensors using .pt file extension.
.. note::
PyTorch preserves storage sharing across serialization. See
:ref:`preserve-storage-sharing` for more details.
.. note::
The 1.6 release of PyTorch switched ``torch.save`` to use a new
zipfile-based file format. ``torch.load`` still retains the ability to
load files in the old format. If for any reason you want ``torch.save``
to use the old format, pass the kwarg ``_use_new_zipfile_serialization=False``.
Example:
>>> # Save to file
>>> x = torch.tensor([0, 1, 2, 3, 4])
>>> torch.save(x, 'tensor.pt')
>>> # Save to io.BytesIO buffer
>>> buffer = io.BytesIO()
>>> torch.save(x, buffer)
"""
_check_dill_version(pickle_module)
if _use_new_zipfile_serialization:
with _open_zipfile_writer(f) as opened_zipfile:
_save(obj, opened_zipfile, pickle_module, pickle_protocol)
return
else:
with _open_file_like(f, 'wb') as opened_file:
_legacy_save(obj, opened_file, pickle_module, pickle_protocol)
def _legacy_save(obj, f, pickle_module, pickle_protocol) -> None:
import torch.nn as nn
serialized_container_types = {}
serialized_storages = {}
# Since loading storages that view the same data with different dtypes is
# not supported, we need to keep track of the dtype associated with each
# storage data_ptr and throw an error if the dtype is ever different.
# TODO: This feature could be added in the future
storage_dtypes: Dict[int, torch.dtype] = {}
def persistent_id(obj: Any) -> Optional[Tuple]:
# FIXME: the docs say that persistent_id should only return a string
# but torch store returns tuples. This works only in the binary protocol
# see
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
if isinstance(obj, type) and issubclass(obj, nn.Module):
if obj in serialized_container_types:
return None
serialized_container_types[obj] = True
source_file = source = None
try:
source_lines, _, source_file = get_source_lines_and_file(obj)
source = ''.join(source_lines)
except Exception: # saving the source is optional, so we can ignore any errors
warnings.warn("Couldn't retrieve source code for container of "
"type " + obj.__name__ + ". It won't be checked "
"for correctness upon loading.")
return ('module', obj, source_file, source)
if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
storage: torch.UntypedStorage
if isinstance(obj, torch.storage.TypedStorage):
# TODO: Once we decide to break serialization FC, this case
# can be deleted
storage = obj._storage
storage_dtype = obj.dtype
storage_type_str = obj.pickle_storage_type()
storage_type = getattr(torch, storage_type_str)
dtype = obj.dtype
storage_numel = obj.size()
elif isinstance(obj, torch.UntypedStorage):
storage = obj
storage_dtype = torch.uint8
storage_type = normalize_storage_type(type(obj))
dtype = torch.uint8
storage_numel = storage.nbytes()
else:
raise TypeError(f'type not recognized: {type(obj)}')
# If storage is allocated, ensure that any other saved storages
# pointing to the same data all have the same dtype. If storage is
# not allocated, don't perform this check
if storage.data_ptr() != 0:
if storage.data_ptr() in storage_dtypes:
if storage_dtype != storage_dtypes[storage.data_ptr()]:
raise RuntimeError(
'Cannot save multiple tensors or storages that '
'view the same data as different types')
else:
storage_dtypes[storage.data_ptr()] = storage_dtype
view_metadata: Optional[Tuple[str, int, int]]
# Offset is always 0, but we keep it for backwards compatibility
# with the old serialization format (which supported storage views)
offset = 0
storage_key = str(storage._cdata)
location = location_tag(storage)
# TODO: There's an issue here with FC. It might be impossible to
# solve, but it's worth noting. Imagine we save a list `[storage,
# tensor]`, where `tensor.storage()` is the same as `storage`, and
# `tensor.element_size() > 1`. Let's say that `tensor.dtype ==
# torch.float`. The storage will be serialized with element size
# of 1, since we're choosing to serialize the first occurance of
# a duplicate storage. Since this legacy serialization format saves
# the numel of the storage, rather than nbytes directly, we'll be
# effectively saving nbytes in this case. We'll be able to load it
# and the tensor back up with no problems in _this_ and future
# versions of pytorch, but in older versions, here's the problem:
# the storage will be loaded up as a UntypedStorage, and then the
# FloatTensor will loaded and the UntypedStorage will be assigned to
# it. Since the storage dtype does not match the tensor dtype, this
# will cause an error. If we reverse the list, like `[tensor,
# storage]`, then we will save the `tensor.storage()` as a faked
# `FloatStorage`, and the saved size will be the correct
# dtype-specific numel count that old versions expect. `tensor`
# will be able to load up properly in old versions, pointing to
# a FloatStorage. However, `storage` is still being translated to
# a UntypedStorage, and it will try to resolve to the same
# FloatStorage that `tensor` contains. This will also cause an
# error. It doesn't seem like there's any way around this.
# Probably, we just cannot maintain FC for the legacy format if the
# saved list contains both a tensor and a storage that point to the
# same data. We should still be able to maintain FC for lists of
# just tensors, as long as all views share the same dtype as the
# tensor they are viewing.
if storage_key not in serialized_storages:
serialized_storages[storage_key] = (storage, dtype)
is_view = storage._cdata != storage._cdata
if is_view:
view_metadata = (str(storage._cdata), offset, storage.nbytes())
else:
view_metadata = None
res = ('storage',
storage_type,
storage_key,
location,
storage_numel,
view_metadata)
return res
return None
sys_info = dict(
protocol_version=PROTOCOL_VERSION,
little_endian=sys.byteorder == 'little',
type_sizes=dict(
short=SHORT_SIZE,
int=INT_SIZE,
long=LONG_SIZE,
),
)
pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol)
pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol)
pickle_module.dump(sys_info, f, protocol=pickle_protocol)
pickler = pickle_module.Pickler(f, protocol=pickle_protocol)
pickler.persistent_id = persistent_id
pickler.dump(obj)
serialized_storage_keys = sorted(serialized_storages.keys())
pickle_module.dump(serialized_storage_keys, f, protocol=pickle_protocol)
f.flush()
for key in serialized_storage_keys:
storage, dtype = serialized_storages[key]
storage._write_file(f, _should_read_directly(f), True, torch._utils._element_size(dtype))
def _save(obj, zip_file, pickle_module, pickle_protocol):
serialized_storages = {}
id_map: Dict[int, str] = {}
# Since loading storages that view the same data with different dtypes is
# not supported, we need to keep track of the dtype associated with each
# storage data_ptr and throw an error if the dtype is ever different.
# TODO: This feature could be added in the future
storage_dtypes: Dict[int, torch.dtype] = {}
def persistent_id(obj):
# FIXME: the docs say that persistent_id should only return a string
# but torch store returns tuples. This works only in the binary protocol
# see
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
if isinstance(obj, torch.storage.TypedStorage):
# TODO: Once we decide to break serialization FC, this case
# can be deleted
storage = obj._storage
storage_dtype = obj.dtype
storage_type_str = obj.pickle_storage_type()
storage_type = getattr(torch, storage_type_str)
storage_numel = obj.size()
else:
storage = obj
storage_dtype = torch.uint8
storage_type = normalize_storage_type(type(obj))
storage_numel = storage.nbytes()
# If storage is allocated, ensure that any other saved storages
# pointing to the same data all have the same dtype. If storage is
# not allocated, don't perform this check
if storage.data_ptr() != 0:
if storage.data_ptr() in storage_dtypes:
if storage_dtype != storage_dtypes[storage.data_ptr()]:
raise RuntimeError(
'Cannot save multiple tensors or storages that '
'view the same data as different types')
else:
storage_dtypes[storage.data_ptr()] = storage_dtype
storage_key = id_map.setdefault(storage._cdata, str(len(id_map)))
location = location_tag(storage)
serialized_storages[storage_key] = storage
return ('storage',
storage_type,
storage_key,
location,
storage_numel)
return None
# Write the pickle data for `obj`
data_buf = io.BytesIO()
pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)
pickler.persistent_id = persistent_id
pickler.dump(obj)
data_value = data_buf.getvalue()
zip_file.write_record('data.pkl', data_value, len(data_value))
# Write each tensor to a file named tensor/the_tensor_key in the zip archive
for key in sorted(serialized_storages.keys()):
name = f'data/{key}'
storage = serialized_storages[key]
# given that we copy things around anyway, we might use storage.cpu()
# this means to that to get tensors serialized, you need to implement
# .cpu() on the underlying Storage
if storage.device.type != 'cpu':
storage = storage.cpu()
# Now that it is on the CPU we can directly copy it into the zip file
num_bytes = storage.nbytes()
zip_file.write_record(name, storage.data_ptr(), num_bytes)
def load(f, map_location=None, pickle_module=pickle, **pickle_load_args):
# Reference: https://github.com/pytorch/pytorch/issues/54354
# The first line of this docstring overrides the one Sphinx generates for the
# documentation. We need it so that Sphinx doesn't leak `pickle`s path from
# the build environment (e.g. `<module 'pickle' from '/leaked/path').
"""load(f, map_location=None, pickle_module=pickle, **pickle_load_args)
Loads an object saved with :func:`torch.save` from a file.
:func:`torch.load` uses Python's unpickling facilities but treats storages,
which underlie tensors, specially. They are first deserialized on the
CPU and are then moved to the device they were saved from. If this fails
(e.g. because the run time system doesn't have certain devices), an exception
is raised. However, storages can be dynamically remapped to an alternative
set of devices using the :attr:`map_location` argument.
If :attr:`map_location` is a callable, it will be called once for each serialized
storage with two arguments: storage and location. The storage argument
will be the initial deserialization of the storage, residing on the CPU.
Each serialized storage has a location tag associated with it which
identifies the device it was saved from, and this tag is the second
argument passed to :attr:`map_location`. The builtin location tags are ``'cpu'``
for CPU tensors and ``'cuda:device_id'`` (e.g. ``'cuda:2'``) for CUDA tensors.
:attr:`map_location` should return either ``None`` or a storage. If
:attr:`map_location` returns a storage, it will be used as the final deserialized
object, already moved to the right device. Otherwise, :func:`torch.load` will
fall back to the default behavior, as if :attr:`map_location` wasn't specified.
If :attr:`map_location` is a :class:`torch.device` object or a string containing
a device tag, it indicates the location where all tensors should be loaded.
Otherwise, if :attr:`map_location` is a dict, it will be used to remap location tags
appearing in the file (keys), to ones that specify where to put the
storages (values).
User extensions can register their own location tags and tagging and
deserialization methods using :func:`torch.serialization.register_package`.
Args:
f: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`),
or a string or os.PathLike object containing a file name
map_location: a function, :class:`torch.device`, string or a dict specifying how to remap storage
locations
pickle_module: module used for unpickling metadata and objects (has to
match the :attr:`pickle_module` used to serialize file)
pickle_load_args: (Python 3 only) optional keyword arguments passed over to
:func:`pickle_module.load` and :func:`pickle_module.Unpickler`, e.g.,
:attr:`errors=...`.
.. warning::
:func:`torch.load()` uses ``pickle`` module implicitly, which is known to be insecure.
It is possible to construct malicious pickle data which will execute arbitrary code
during unpickling. Never load data that could have come from an untrusted
source, or that could have been tampered with. **Only load data you trust**.
.. note::
When you call :func:`torch.load()` on a file which contains GPU tensors, those tensors
will be loaded to GPU by default. You can call ``torch.load(.., map_location='cpu')``
and then :meth:`load_state_dict` to avoid GPU RAM surge when loading a model checkpoint.
.. note::
By default, we decode byte strings as ``utf-8``. This is to avoid a common error
case ``UnicodeDecodeError: 'ascii' codec can't decode byte 0x...``
when loading files saved by Python 2 in Python 3. If this default
is incorrect, you may use an extra :attr:`encoding` keyword argument to specify how
these objects should be loaded, e.g., :attr:`encoding='latin1'` decodes them
to strings using ``latin1`` encoding, and :attr:`encoding='bytes'` keeps them
as byte arrays which can be decoded later with ``byte_array.decode(...)``.
Example:
>>> # xdoctest: +SKIP("undefined filepaths")
>>> torch.load('tensors.pt')
# Load all tensors onto the CPU
>>> torch.load('tensors.pt', map_location=torch.device('cpu'))
# Load all tensors onto the CPU, using a function
>>> torch.load('tensors.pt', map_location=lambda storage, loc: storage)
# Load all tensors onto GPU 1
>>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1))
# Map tensors from GPU 1 to GPU 0
>>> torch.load('tensors.pt', map_location={'cuda:1':'cuda:0'})
# Load tensor from io.BytesIO object
>>> with open('tensor.pt', 'rb') as f:
... buffer = io.BytesIO(f.read())
>>> torch.load(buffer)
# Load a module with 'ascii' encoding for unpickling
>>> torch.load('module.pt', encoding='ascii')
"""
_check_dill_version(pickle_module)
if 'encoding' not in pickle_load_args.keys():
pickle_load_args['encoding'] = 'utf-8'
with _open_file_like(f, 'rb') as opened_file:
if _is_zipfile(opened_file):
# The zipfile reader is going to advance the current file position.
# If we want to actually tail call to torch.jit.load, we need to
# reset back to the original position.
orig_position = opened_file.tell()
with _open_zipfile_reader(opened_file) as opened_zipfile:
if _is_torchscript_zip(opened_zipfile):
warnings.warn("'torch.load' received a zip file that looks like a TorchScript archive"
" dispatching to 'torch.jit.load' (call 'torch.jit.load' directly to"
" silence this warning)", UserWarning)
opened_file.seek(orig_position)
return torch.jit.load(opened_file, map_location=map_location)
return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
# Register pickling support for layout instances such as
# torch.sparse_coo, etc
def _get_layout(name):
"""Get layout extension object from its string representation.
"""
cache = _get_layout.cache # type: ignore[attr-defined]
if not cache:
for v in torch.__dict__.values():
if isinstance(v, torch.layout):
cache[str(v)] = v
return cache[name]
# There are yet not good way to type annotate function attributes https://github.com/python/mypy/issues/2087
_get_layout.cache = {} # type: ignore[attr-defined]
copyreg.pickle(torch.layout, lambda obj: (_get_layout, (str(obj),)))
def _legacy_load(f, map_location, pickle_module, **pickle_load_args):
deserialized_objects: Dict[int, Any] = {}
restore_location = _get_restore_location(map_location)
class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined]
def find_class(self, mod_name, name):
if type(name) is str and 'Storage' in name:
try:
return StorageType(name)
except KeyError:
pass
return super().find_class(mod_name, name)
def _check_container_source(container_type, source_file, original_source):
try:
current_source = ''.join(get_source_lines_and_file(container_type)[0])
except Exception: # saving the source is optional, so we can ignore any errors
warnings.warn("Couldn't retrieve source code for container of "
"type " + container_type.__name__ + ". It won't be checked "
"for correctness upon loading.")
return
if original_source != current_source:
if container_type.dump_patches:
file_name = container_type.__name__ + '.patch'
diff = difflib.unified_diff(current_source.split('\n'),
original_source.split('\n'),
source_file,
source_file, lineterm="")
lines = '\n'.join(diff)
try:
with open(file_name, 'a+') as f:
file_size = f.seek(0, 2)
f.seek(0)
if file_size == 0:
f.write(lines)
elif file_size != len(lines) or f.read() != lines:
raise IOError
msg = ("Saved a reverse patch to " + file_name + ". "
"Run `patch -p0 < " + file_name + "` to revert your "
"changes.")
except IOError:
msg = ("Tried to save a patch, but couldn't create a "
"writable file " + file_name + ". Make sure it "
"doesn't exist and your working directory is "
"writable.")
else:
msg = ("you can retrieve the original source code by "
"accessing the object's source attribute or set "
"`torch.nn.Module.dump_patches = True` and use the "
"patch tool to revert the changes.")
msg = f"source code of class '{torch.typename(container_type)}' has changed. {msg}"
warnings.warn(msg, SourceChangeWarning)
def legacy_load(f):
deserialized_objects: Dict[int, Any] = {}
def persistent_load(saved_id):
if isinstance(saved_id, tuple):
# Ignore containers that don't have any sources saved
if all(saved_id[1:]):
_check_container_source(*saved_id)
return saved_id[0]
return deserialized_objects[int(saved_id)]
with closing(tarfile.open(fileobj=f, mode='r:', format=tarfile.PAX_FORMAT)) as tar, \
mkdtemp() as tmpdir:
tar.extract('storages', path=tmpdir)
with open(os.path.join(tmpdir, 'storages'), 'rb', 0) as f:
num_storages = pickle_module.load(f, **pickle_load_args)
for i in range(num_storages):
args = pickle_module.load(f, **pickle_load_args)
key, location, storage_type = args
dtype = storage_type.dtype
obj = cast(Storage, torch.UntypedStorage)._new_with_file(f, torch._utils._element_size(dtype))
obj = restore_location(obj, location)
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
deserialized_objects[key] = torch.storage.TypedStorage(
wrap_storage=obj,
dtype=dtype)
storage_views = pickle_module.load(f, **pickle_load_args)
for target_cdata, root_cdata, offset, numel in storage_views:
root = deserialized_objects[root_cdata]
element_size = torch._utils._element_size(root.dtype)
offset_bytes = offset * element_size
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
deserialized_objects[target_cdata] = torch.storage.TypedStorage(
wrap_storage=root._storage[offset_bytes:offset_bytes + numel * element_size],
dtype=root.dtype)
tar.extract('tensors', path=tmpdir)
with open(os.path.join(tmpdir, 'tensors'), 'rb', 0) as f:
num_tensors = pickle_module.load(f, **pickle_load_args)
for _ in range(num_tensors):
args = pickle_module.load(f, **pickle_load_args)
key, storage_id, original_tensor_type = args
storage = deserialized_objects[storage_id]
ndim, = struct.unpack('<i', f.read(4))
# skip next 4 bytes; legacy encoding treated ndim as 8 bytes
f.read(4)
numel = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
stride = struct.unpack(f'<{ndim}q', f.read(8 * ndim))
storage_offset, = struct.unpack('<q', f.read(8))
tensor = torch.tensor([], dtype=storage.dtype).set_(
storage._storage, storage_offset, numel, stride)
deserialized_objects[key] = tensor
pickle_file = tar.extractfile('pickle')
unpickler = UnpicklerWrapper(pickle_file, **pickle_load_args)
unpickler.persistent_load = persistent_load
result = unpickler.load()
return result
deserialized_objects = {}
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
if typename == 'module':
# Ignore containers that don't have any sources saved
if all(data[1:]):
_check_container_source(*data)
return data[0]
elif typename == 'storage':
storage_type, root_key, location, numel, view_metadata = data
location = _maybe_decode_ascii(location)
dtype = storage_type.dtype
nbytes = numel * torch._utils._element_size(dtype)
if root_key not in deserialized_objects:
obj = cast(Storage, torch.UntypedStorage(nbytes))
obj._torch_load_uninitialized = True
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
deserialized_objects[root_key] = torch.storage.TypedStorage(
wrap_storage=restore_location(obj, location),
dtype=dtype)
typed_storage = deserialized_objects[root_key]
if view_metadata is not None:
view_key, offset, view_size = view_metadata
offset_bytes = offset * torch._utils._element_size(dtype)
view_size_bytes = view_size * torch._utils._element_size(dtype)
if view_key not in deserialized_objects:
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
deserialized_objects[view_key] = torch.storage.TypedStorage(
wrap_storage=typed_storage._storage[offset_bytes:offset_bytes + view_size_bytes],
dtype=dtype)
res = deserialized_objects[view_key]
else:
res = typed_storage
return res
else:
raise RuntimeError("Unknown saved id type: %s" % saved_id[0])
_check_seekable(f)
f_should_read_directly = _should_read_directly(f)
if f_should_read_directly and f.tell() == 0:
# legacy_load requires that f has fileno()
# only if offset is zero we can attempt the legacy tar file loader
try:
return legacy_load(f)
except tarfile.TarError:
if _is_zipfile(f):
# .zip is used for torch.jit.save and will throw an un-pickling error here
raise RuntimeError(
f"{f.name} is a zip archive (did you mean to use torch.jit.load()?)") from None
# if not a tarfile, reset file offset and proceed
f.seek(0)
if not hasattr(f, 'readinto') and (3, 8, 0) <= sys.version_info < (3, 8, 2):
raise RuntimeError(
"torch.load does not work with file-like objects that do not implement readinto on Python 3.8.0 and 3.8.1. "
f"Received object of type \"{type(f)}\". Please update to Python 3.8.2 or newer to restore this "
"functionality.")
magic_number = pickle_module.load(f, **pickle_load_args)
if magic_number != MAGIC_NUMBER:
raise RuntimeError("Invalid magic number; corrupt file?")
protocol_version = pickle_module.load(f, **pickle_load_args)
if protocol_version != PROTOCOL_VERSION:
raise RuntimeError("Invalid protocol version: %s" % protocol_version)
_sys_info = pickle_module.load(f, **pickle_load_args)
unpickler = UnpicklerWrapper(f, **pickle_load_args)
unpickler.persistent_load = persistent_load
result = unpickler.load()
deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
offset = f.tell() if f_should_read_directly else None
for key in deserialized_storage_keys:
assert key in deserialized_objects
typed_storage = deserialized_objects[key]
typed_storage._storage._set_from_file(
f, offset, f_should_read_directly,
torch._utils._element_size(typed_storage.dtype))
if offset is not None:
offset = f.tell()
torch._utils._validate_loaded_sparse_tensors()
return result
def _maybe_decode_ascii(bytes_str: Union[bytes, str]) -> str:
# When using encoding='bytes' in Py3, some **internal** keys stored as
# strings in Py2 are loaded as bytes. This function decodes them with
# ascii encoding, one that Py3 uses by default.
#
# NOTE: This should only be used on internal keys (e.g., `typename` and
# `location` in `persistent_load` below!
if isinstance(bytes_str, bytes):
return bytes_str.decode('ascii')
return bytes_str
def _get_restore_location(map_location):
if map_location is None:
restore_location = default_restore_location
elif isinstance(map_location, dict):
def restore_location(storage, location):
location = map_location.get(location, location)
return default_restore_location(storage, location)
elif isinstance(map_location, _string_classes):
def restore_location(storage, location):
return default_restore_location(storage, map_location)
elif isinstance(map_location, torch.device):
def restore_location(storage, location):
return default_restore_location(storage, str(map_location))
else:
def restore_location(storage, location):
result = map_location(storage, location)
if result is None:
result = default_restore_location(storage, location)
return result
return restore_location
class StorageType():
def __init__(self, name):
self.dtype = _get_dtype_from_pickle_storage_type(name)
def __str__(self):
return f'StorageType(dtype={self.dtype})'
def _load(zip_file, map_location, pickle_module, pickle_file='data.pkl', **pickle_load_args):
restore_location = _get_restore_location(map_location)
loaded_storages = {}
def load_tensor(dtype, numel, key, location):
name = f'data/{key}'
storage = zip_file.get_storage_from_record(name, numel, torch.UntypedStorage).storage().untyped()
# TODO: Once we decide to break serialization FC, we can
# stop wrapping with TypedStorage
loaded_storages[key] = torch.storage.TypedStorage(
wrap_storage=restore_location(storage, location),
dtype=dtype)
def persistent_load(saved_id):
assert isinstance(saved_id, tuple)
typename = _maybe_decode_ascii(saved_id[0])
data = saved_id[1:]
assert typename == 'storage', \
f"Unknown typename for persistent_load, expected 'storage' but got '{typename}'"
storage_type, key, location, numel = data
if storage_type is torch.UntypedStorage:
dtype = torch.uint8
else:
dtype = storage_type.dtype
if key not in loaded_storages:
nbytes = numel * torch._utils._element_size(dtype)
load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location))
return loaded_storages[key]
load_module_mapping: Dict[str, str] = {
# See https://github.com/pytorch/pytorch/pull/51633
'torch.tensor': 'torch._tensor'
}
# Need to subclass Unpickler instead of directly monkey-patching the find_class method
# because it's marked readonly in pickle.
# The type: ignore is because mypy can't statically determine the type of this class.
class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined]
# from https://stackoverflow.com/questions/13398462/unpickling-python-objects-with-a-changed-module-path/13405732
# Lets us override the imports that pickle uses when unpickling an object.
# This is useful for maintaining BC if we change a module path that tensor instantiation relies on.
def find_class(self, mod_name, name):
if type(name) is str and 'Storage' in name:
try:
return StorageType(name)
except KeyError:
pass
mod_name = load_module_mapping.get(mod_name, mod_name)
return super().find_class(mod_name, name)
# Load the data (which may in turn use `persistent_load` to load tensors)
data_file = io.BytesIO(zip_file.get_record(pickle_file))
unpickler = UnpicklerWrapper(data_file, **pickle_load_args)
unpickler.persistent_load = persistent_load
result = unpickler.load()
torch._utils._validate_loaded_sparse_tensors()
return result
def _is_torchscript_zip(zip_file):
return 'constants.pkl' in zip_file.get_all_records()
|
pytorch-master
|
torch/serialization.py
|
import sys
import traceback
import warnings
from collections import defaultdict
from typing import Any, DefaultDict, List, Optional
import torch
def _type(self, dtype=None, non_blocking=False, **kwargs):
"""Returns the type if `dtype` is not provided, else casts this object to
the specified type.
If this is already of the correct type, no copy is performed and the
original object is returned.
Args:
dtype (type or string): The desired type
non_blocking (bool): If ``True``, and the source is in pinned memory
and destination is on the GPU or vice versa, the copy is performed
asynchronously with respect to the host. Otherwise, the argument
has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument. The ``async`` arg is deprecated.
"""
non_blocking = _get_async_or_non_blocking("type", non_blocking, kwargs)
if dtype is None:
return self.__module__ + "." + self.__class__.__name__
if isinstance(dtype, str):
dtype = _import_dotted_name(dtype)
if dtype == type(self):
return self
if self.is_sparse:
if not dtype.is_sparse:
raise RuntimeError("Cannot cast sparse tensor to dense tensor")
new_module_name = dtype.__module__.replace(".sparse", "")
new_values_type_name = new_module_name + "." + dtype.__name__
new_values = torch.Tensor._values(self).type(new_values_type_name, non_blocking)
new_indices_type_name = new_module_name + ".LongTensor"
new_indices = torch.Tensor._indices(self).type(
new_indices_type_name, non_blocking
)
return dtype(new_indices, new_values, self.size())
if dtype.is_sparse:
raise RuntimeError("Cannot cast dense tensor to sparse tensor")
return dtype(self.size()).copy_(self, non_blocking)
def _cuda(self, device=None, non_blocking=False, **kwargs):
"""Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device, then
no copy is performed and the original object is returned.
Args:
device (int): The destination GPU id. Defaults to the current device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host. Otherwise,
the argument has no effect.
**kwargs: For compatibility, may contain the key ``async`` in place of
the ``non_blocking`` argument.
"""
non_blocking = _get_async_or_non_blocking("cuda", non_blocking, kwargs)
if self.is_cuda:
if device is None:
device = torch.cuda.current_device()
if self.get_device() == device:
return self
else:
if device is None:
device = -1
with torch.cuda.device(device):
if self.is_sparse:
new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
indices = torch.Tensor._indices(self).cuda(device, non_blocking)
values = torch.Tensor._values(self).cuda(device, non_blocking)
return new_type(indices, values, self.size())
else:
untyped_storage = torch.UntypedStorage(
self.size(), device=torch.device("cuda")
)
untyped_storage.copy_(self, non_blocking)
return untyped_storage
def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
"""Return the non-blocking flag given the function name and kwargs.
Args:
function_name (str): the name of the function being used.
non_blocking (bool): the default value.
**kwargs (dict): the kwargs passed to the function.
"""
if not kwargs:
return non_blocking
if len(kwargs) != 1 or "async" not in kwargs:
message = "{}() got an unexpected keyword argument '{}'"
argument = list(kwargs.keys()).pop()
raise TypeError(message.format(function_name, argument))
warnings.warn("'async' is deprecated; use 'non_blocking'")
return kwargs["async"]
# Note [Don't serialize hooks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Since time immemorial, we have serialized the backward hooks associated with
# variables. This kind of half-worked--Python can pickle global functions
# (but not closures!)--but there were problems.
#
# - It's fragile. If you serialize a backward hook into a saved
# model, and then you rename the function associated with the hook,
# now your saved model is broken and you can't load it anymore.
#
# - It's not actually used. The standard recommendation is to
# serialize the *state_dict* of a model, not the model itself
# (since this is more stable to code changes affecting the model
# serialization), and the state dict saves "data" only, thus
# stripping the the backward hooks. In some cases, hooks are
# essential to the well-functioning of a model (e.g., DDP),
# but DDP already manages readding the hooks!
#
# - We didn't serialize them in many cases. Prior to #10220, we
# were dropping backward hooks in ForkingPickler. We "fixed" this
# to be convenient with other serialization sites, but lack of
# serializing backward hooks wasn't actually the root cause of
# the bug.
#
# With these cases in mind, we have decided that a better strategy
# is to just NOT serialize hooks at all.
#
# Since this is a BC-breaking change, we should warn when we previously
# serialized a hook, but no longer do so. This will be done by adding a special
# sentinel property to hooks will be used to suppress this warning. If a hook
# has the property _torch_serialize_ignore, we will not emit a warning if we
# attempt to serialize a Tensor with this hook attached to it.
#
# By the way, when _backward_hooks is skipped, we must give an EMPTY
# OrderedDict(), if you pass a None you'll run afoul #12219.
# TODO: Once we decide to break serialization FC, `storage` no longer needs to
# be a TypedStorage
def _rebuild_tensor(storage, storage_offset, size, stride):
# first construct a tensor with the correct dtype/device
t = torch.tensor([], dtype=storage.dtype, device=storage.untyped().device)
return t.set_(storage.untyped(), storage_offset, size, stride)
def _rebuild_tensor_v2(
storage, storage_offset, size, stride, requires_grad, backward_hooks
):
tensor = _rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
# NB: This line exists only for backwards compatibility; the
# general expectation is that backward_hooks is an empty
# OrderedDict. See Note [Don't serialize hooks]
tensor._backward_hooks = backward_hooks
return tensor
_sparse_tensors_to_validate: List["torch.Tensor"] = []
# In _legacy_load() in serialization.py we unpickle storages after the sparse
# tensors have been already unpickled. Those storages contain data necessary for
# validating sparse tensors: indices and values. That's why sparse tensors are
# first unpickled without any validation, and then this function is called just
# before _legacy_load() returns, so that all the sparse tensors can be validated
# in bulk.
#
# The same procedure must be followed by _load() in serialization.py because due
# to Pickler semantics, we have to use the same (non-validating) function for
# unpickling sparse tensors, regardless of the caller.
def _validate_loaded_sparse_tensors():
try:
for t in _sparse_tensors_to_validate:
if t.is_sparse:
torch._validate_sparse_coo_tensor_args(
t._indices(), t._values(), t.size()
)
elif t.is_sparse_csr:
# TODO: Validation currently involves an expensive traversal
# on CPU, which may include a device transfer.
torch._validate_sparse_csr_tensor_args(
t.crow_indices(), t.col_indices(), t.values(), t.size()
)
else:
raise NotImplementedError(
"_validate_loaded_sparse_tensors for layout `%s`" % (t.layout)
)
finally:
_sparse_tensors_to_validate.clear()
def _rebuild_sparse_tensor(layout, data):
"""
Rebuilds a sparse tensor from its sparse storage representation.
Args:
layout (str): The sparse storage layout of the tensor.
data (tuple): The tensor's sparse storage representation.
"""
if layout == torch.sparse_coo:
indices, values, size = data
result = torch._sparse_coo_tensor_unsafe(indices, values, size)
_sparse_tensors_to_validate.append(result)
return result
raise NotImplementedError("rebuilding sparse tensor for layout %s" % (layout))
def _rebuild_sparse_csr_tensor(layout, data):
if layout == torch.sparse_csr:
crow_indices, col_indices, values, size = data
result = torch._sparse_csr_tensor_unsafe(
crow_indices, col_indices, values, size
)
_sparse_tensors_to_validate.append(result)
return result
raise NotImplementedError("rebuilding sparse tensor for layout %s" % (layout))
def _rebuild_device_tensor_from_numpy(data, dtype, device, requires_grad):
tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
tensor.requires_grad = requires_grad
return tensor
# Should not be used, only here to be able to load Tensors serialized with older versions of pytorch
_rebuild_xla_tensor = _rebuild_device_tensor_from_numpy
def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):
return torch.empty_strided(
size, stride, dtype=dtype, device="meta", requires_grad=requires_grad
)
def _rebuild_wrapper_subclass(
cls, dtype, size, stride, storage_offset, layout, device, requires_grad
):
return torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls,
size,
strides=stride,
storage_offset=storage_offset,
layout=layout,
device=device,
requires_grad=requires_grad,
)
# TODO: Once we decide to break serialization FC, `storage` no longer needs to
# be a TypedStorage
def _rebuild_qtensor(
storage,
storage_offset,
size,
stride,
quantizer_params,
requires_grad,
backward_hooks,
):
qscheme = quantizer_params[0]
if qscheme == torch.per_tensor_affine:
_, scale, zero_point = quantizer_params
tensor = torch._empty_affine_quantized(
size,
scale=scale,
zero_point=zero_point,
dtype=storage.dtype,
device=storage.device,
)
elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
_, scales, zero_points, axis = quantizer_params
if type(scales) is list and type(zero_points) is list:
if qscheme == torch.per_channel_affine:
scales = torch.tensor(scales, dtype=torch.double, device=storage.device)
zero_points = torch.tensor(
zero_points, dtype=torch.long, device=storage.device
)
else:
scales = torch.tensor(scales, dtype=torch.float, device=storage.device)
zero_points = torch.tensor(
zero_points, dtype=torch.float, device=storage.device
)
tensor = torch._empty_per_channel_affine_quantized(
size,
scales=scales,
zero_points=zero_points,
axis=axis,
dtype=storage.dtype,
device=storage.device,
)
else:
raise RuntimeError(
"Can't deserialize quantized tensor with qscheme {}".format(qscheme)
)
tensor.set_(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
# NB: This line exists only for backwards compatibility; the
# general expectation is that backward_hooks is an empty
# OrderedDict. See Note [Don't serialize hooks]
tensor._backward_hooks = backward_hooks
return tensor
def _rebuild_parameter(data, requires_grad, backward_hooks):
param = torch.nn.Parameter(data, requires_grad)
# NB: This line exists only for backwards compatibility; the
# general expectation is that backward_hooks is an empty
# OrderedDict. See Note [Don't serialize hooks]
param._backward_hooks = backward_hooks
return param
def _import_dotted_name(name):
components = name.split(".")
obj = __import__(components[0])
for component in components[1:]:
obj = getattr(obj, component)
return obj
# Taken from python 3.5 docs
def _accumulate(iterable, fn=lambda x, y: x + y):
"Return running totals"
# _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
# _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = fn(total, element)
yield total
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Args:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
return torch._C._nn.flatten_dense_tensors(tensors)
def _flatten_sparse_tensors(tensors):
"""Flatten sparse tensors into two contiguous 1D buffers, one of indices and
one of values. Assume tensors are of same sparse type.
Args:
tensors (Iterable[Tensor]): sparse tensors to flatten.
Returns:
A tuple of two contiguous 1D buffers, one containing input tensors'
indices and the other containing the values.
"""
flat_indices = torch._C._nn.flatten_dense_tensors(
[torch.Tensor._indices(t) for t in tensors]
)
flat_values = torch._C._nn.flatten_dense_tensors(
[torch.Tensor._values(t) for t in tensors]
)
return flat_indices, flat_values
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Args:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
return torch._C._nn.unflatten_dense_tensors(flat, tensors)
def _unflatten_sparse_tensors(flat, tensors):
"""View flat buffer (containing indices and values) using the sizes of
tensors. Assume that tensors are of same sparse type, and that flat is given
by _flatten_sparse_tensors.
Args:
flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
tensors to unflatten.
tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened sparse tensors with sizes same as tensors and values from
flat.
"""
flat_indices, flat_values = flat
indices = torch._C._nn.unflatten_dense_tensors(
flat_indices, [torch.Tensor._indices(t) for t in tensors]
)
values = torch._C._nn.unflatten_dense_tensors(
flat_values, [torch.Tensor._values(t) for t in tensors]
)
outputs = []
for t, i, v in zip(tensors, indices, values):
outputs.append(t.new(i, v, t.size()))
return tuple(outputs)
def _reorder_tensors_as(tensors, ordered_tensors):
"""Assume that tensors are of same order as ordered_tensors within their
types, e.g., from _take_tensors. Reorder them to be of same order as
ordered_tensors.
Args:
tensors (Iterable[Tensor]): tensors to be reordered. They should be of
the same order as ordered_tensors within their own types.
ordered_tensors (Iterable[Tensor]): tensors whose order will be the
reference.
Returns:
Ordered tuple of tensors with contents from tensors and order of
ordered_tensors.
"""
type_dict = defaultdict(list)
for tensor in tensors:
type_dict[tensor.type()].append(tensor)
type_dict_ = {t: iter(coll) for t, coll in type_dict.items()}
return tuple(next(type_dict_[tensor.type()]) for tensor in ordered_tensors)
def _take_tensors(tensors, size_limit):
"""Group tensors into chunks. This generator yields a chunk at each time,
each containing tensors of same type up to certain byte limit in total size.
Args:
tensors (Sequence): A sequence of tensors to be separated into chunks.
size_limit (int): The limit of each chunk in bytes.
Yields:
Blocks of tensors of same type and within size_limit. The yielded
tensors are only ordered as the original sequence within its types.
"""
buf_dict: DefaultDict[str, List] = defaultdict(lambda: [[], 0])
for tensor in tensors:
t = tensor.type()
if tensor.is_sparse:
indices = torch.Tensor._indices(tensor)
values = torch.Tensor._values(tensor)
size = (
indices.numel() * indices.element_size()
+ values.numel() * values.element_size()
)
else:
size = tensor.numel() * tensor.element_size()
buf_and_size = buf_dict[t]
if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
yield buf_and_size[0]
buf_and_size = buf_dict[t] = [[], 0]
buf_and_size[0].append(tensor)
buf_and_size[1] += size
for buf, _ in buf_dict.values():
if len(buf) > 0:
yield buf
# annotation decorator to get annotations in a way that is compatible
# with both Python 2 and 3
def annotate(ret, **kwargs):
def dec(fun):
fun.__annotations__ = dict(kwargs)
fun.__annotations__["return"] = ret
return fun
return dec
# NOTE [ Python Traceback Reference Cycle Problem ]
#
# When using sys.exc_info(), it is important to **not** store the exc_info[2],
# which is the traceback, because otherwise you will run into the traceback
# reference cycle problem, i.e., the traceback holding reference to the frame,
# and the frame (which holds reference to all the object in its temporary scope)
# holding reference the traceback.
class KeyErrorMessage(str):
r"""str subclass that returns itself in repr"""
def __repr__(self):
return self
class ExceptionWrapper(object):
r"""Wraps an exception plus traceback to communicate across threads"""
def __init__(self, exc_info=None, where="in background"):
# It is important that we don't store exc_info, see
# NOTE [ Python Traceback Reference Cycle Problem ]
if exc_info is None:
exc_info = sys.exc_info()
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
self.where = where
def reraise(self):
r"""Reraises the wrapped exception in the current thread"""
# Format a message such as: "Caught ValueError in DataLoader worker
# process 2. Original Traceback:", followed by the traceback.
msg = "Caught {} {}.\nOriginal {}".format(
self.exc_type.__name__, self.where, self.exc_msg
)
if self.exc_type == KeyError:
# KeyError calls repr() on its argument (usually a dict key). This
# makes stack traces unreadable. It will not be changed in Python
# (https://bugs.python.org/issue2651), so we work around it.
msg = KeyErrorMessage(msg)
elif getattr(self.exc_type, "message", None):
# Some exceptions have first argument as non-str but explicitly
# have message field
raise self.exc_type(message=msg)
try:
exception = self.exc_type(msg)
except TypeError:
# If the exception takes multiple arguments, don't try to
# instantiate since we don't know how to
raise RuntimeError(msg) from None
raise exception
def _get_available_device_type():
if torch.cuda.is_available():
return "cuda"
if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
return "xpu"
# add more available device types here
return None
def _get_device_attr(get_member):
device_type = _get_available_device_type()
if device_type and device_type.lower() == "cuda":
return get_member(torch.cuda)
if device_type and device_type.lower() == "xpu":
return get_member(torch.xpu) # type: ignore[attr-defined]
# add more available device types here
return None
def _get_current_device_index():
# current device index
return _get_device_attr(lambda m: m.current_device())
def _get_all_device_indices():
# all device index
return _get_device_attr(lambda m: list(range(m.device_count())))
def _get_devices_properties(device_ids):
# all device properties
return [_get_device_attr(lambda m: m.get_device_properties(i)) for i in device_ids]
def get_current_device_index() -> int:
r"""Checks if there are CUDA devices available and
returns the device index of the current default CUDA device.
Returns -1 in case there are no CUDA devices available.
Arguments: ``None``
"""
if torch.cuda.device_count() > 0:
return torch.cuda.current_device()
return -1
def _get_device_index(
device: Any, optional: bool = False, allow_cpu: bool = False
) -> int:
r"""Gets the device index from :attr:`device`, which can be a torch.device
object, a Python integer, or ``None``.
If :attr:`device` is a torch.device object, returns the device index if it
has index. Note that for a device without a specified index,
i.e., ``torch.device('xxx')``, this will return the current default
device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
CPU devices will be accepted and ``-1`` will be returned in this case.
If :attr:`device` is a Python integer, it is returned as is.
If :attr:`device` is ``None``, this will return the current default
device of the supported runtime platform if :attr:`optional` is ``True``.
i.e., the current default CUDA device will be returned if CUDA runtime is supported.
"""
if isinstance(device, str):
device = torch.device(device)
device_idx: Optional[int] = None
if isinstance(device, torch.device):
if not allow_cpu and device.type == "cpu":
raise ValueError("Expected a non cpu device, but got: {}".format(device))
device_idx = -1 if device.type == "cpu" else device.index
if isinstance(device, int):
device_idx = device
if device_idx is None:
if optional:
# The eager API _get_current_device_index uses `lambda` functions which are
# not supported in JIT and hence not scriptable. The JIT equivalent API to get
# the current device index is `get_current_device_index()` which can
# be scripted. We use is_scripting to check the mode we are in and call the
# appropriate API.
if torch.jit.is_scripting():
device_idx = get_current_device_index()
else:
device_idx = _get_current_device_index()
else:
raise ValueError(
"Expected a torch.device with a specified index "
"or an integer, but got:{}".format(device)
)
return device_idx
def _handle_complex(tensor):
"""
Returns a real view of a tensor if complex dtype else just the tensor
need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
"""
return (
torch.view_as_real(tensor)
if not isinstance(tensor, torch.nn.UninitializedParameter)
and tensor.is_complex()
else tensor
)
def _element_size(dtype):
"""
Returns the element size for a dtype, in bytes
"""
if not isinstance(dtype, torch.dtype):
raise RuntimeError(f"expected torch.dtype, but got {type(dtype)}")
if dtype.is_complex:
return torch.finfo(dtype).bits >> 2
elif dtype.is_floating_point:
return torch.finfo(dtype).bits >> 3
elif dtype == torch.bool:
# NOTE: torch.bool is not supported in torch.iinfo()
return 1
else:
return torch.iinfo(dtype).bits >> 3
class _ClassPropertyDescriptor:
def __init__(self, fget, fset=None):
self.fget = fget
def __get__(self, instance, owner=None):
if owner is None:
owner = type(instance)
return self.fget.__get__(instance, owner)()
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return _ClassPropertyDescriptor(func)
|
pytorch-master
|
torch/_utils.py
|
from copy import deepcopy
from dataclasses import dataclass
from functools import lru_cache
from warnings import warn
import torch
import torch.overrides
from torch._prims_common import getnvFuserDtype, Number
from torch.fx import GraphModule
from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
if torch.cuda.is_available():
from torch._C._nvfuser import ( # type: ignore[import]
DataType,
Fusion,
FusionDefinition,
)
else:
DataType = None
# nvFuserTensorTemplate and nvFuserScalarTemplate are helper objects
# for cached construction of the nvFuser's Fusion
# TODO: change what is stored in the cache for nvFuser's Tensor objects
# https://github.com/pytorch/pytorch/issues/80551
@dataclass(frozen=True)
class nvFuserTensorTemplate:
size: tuple
stride: tuple
dtype: DataType
@dataclass(frozen=True)
class nvFuserScalarTemplate:
dtype: DataType
def to_nvfuser_template_args(args):
def to_nvfuser(arg):
if isinstance(arg, torch.Tensor):
return nvFuserTensorTemplate(
arg.size(), arg.stride(), getnvFuserDtype(arg.dtype)
)
elif isinstance(arg, Number):
return nvFuserScalarTemplate(getnvFuserDtype(type(arg)))
else:
return arg
return tree_map(to_nvfuser, args)
# MyPy bug: https://github.com/python/mypy/issues/5107
@lru_cache(maxsize=1024) # type: ignore[arg-type]
def make_nvfuser_fusion(gm: GraphModule, *nv_args_templates):
# PROTOTYPE nvfuser executor
# Everything in the graph must support nvfuser
for node in gm.graph.nodes:
if (
node.op == "call_function"
and getattr(node.target, "impl_nvfuser", None) is None
):
raise ValueError(
"All call_function nodes in the graph must support nvfuser. "
f"Node {node} with target {node.target} does not support nvfuser"
)
fusion = Fusion()
with FusionDefinition(fusion) as fd:
def _to_nvfuser_constant(arg):
if isinstance(arg, Number):
return fd.define_constant(arg)
else:
return arg
class FusionInterpreter(torch.fx.Interpreter):
def call_function(self, target, args, kwargs):
args = tuple(map(_to_nvfuser_constant, args))
target = target.impl_nvfuser
args = (fd,) + args
return target(*args, **kwargs)
def templates_to_nvfuser_inputs(arg):
if isinstance(arg, nvFuserTensorTemplate):
x = fd.define_tensor(arg.size, arg.stride, arg.dtype)
return x
elif isinstance(arg, nvFuserScalarTemplate):
x = fd.define_scalar(arg.dtype)
return x
else:
return arg
# Transforms graph to call nvfuser lowerings
nv_args = tuple(map(templates_to_nvfuser_inputs, nv_args_templates))
out = FusionInterpreter(gm).run(*nv_args)
flat_out, unflatten_spec = tree_flatten(out)
for o in flat_out:
fd.add_output(o)
return fusion, unflatten_spec
def nvfuser_execute(gm: GraphModule, *args):
if not torch.cuda.is_available():
raise RuntimeError(
"Attempting to use nvFuser trace executor but CUDA is not available!"
)
flat_args, _ = tree_flatten(args)
# Construction of the fusion is expensive and cached based on the GraphModule
# and symbolic nvFuser args.
nv_template_args = to_nvfuser_template_args(flat_args)
fusion, unflatten_spec = make_nvfuser_fusion(gm, *nv_template_args) # type: ignore[misc]
# Inputs to fusion.execute correspond to the same template/symbolic inputs
# marked with `define_tensor/scalar`
concrete_fusion_inputs = tuple(
arg for arg in flat_args if isinstance(arg, (torch.Tensor, Number))
)
return tree_unflatten(
fusion.execute(concrete_fusion_inputs), # type: ignore[has-type]
unflatten_spec, # type: ignore[has-type]
)
class NvfuserPrimOperatorSupport(torch.fx.passes.operator_support.OperatorSupport):
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
return (
node.op == "call_function"
and getattr(node.target, "impl_nvfuser", None) is not None
)
class PartitionedInterpreter(torch.fx.Interpreter):
def call_module(self, target, args, kwargs):
assert isinstance(target, str)
assert len(kwargs) == 0
submod = self.fetch_attr(target)
# CapabilityBasedPartitioner hardcodes the name of the subgraphs with supported_ops as "fused_" + subgraph id
if target.startswith("fused_"):
return nvfuser_execute(submod, *args)
else:
return super().call_module(target, args, kwargs)
# MyPy bug: https://github.com/python/mypy/issues/5107
@lru_cache() # type: ignore[arg-type]
def maybe_partition_graph(gm: GraphModule):
supported_ops = NvfuserPrimOperatorSupport()
call_function_nodes = filter(lambda n: n.op == "call_function", gm.graph.nodes)
# the graph is partitioned only if at least one node is not supported by nvFuser
any_unsupported = any(
not supported_ops.is_node_supported(None, node) for node in call_function_nodes
)
if any_unsupported:
# CapabilityBasedPartitioner modifies the graph in-place so we need to make a copy of the graph
gm = deepcopy(gm)
partitioner = CapabilityBasedPartitioner(
gm, supported_ops, allows_single_node_partition=True
)
partitions = partitioner.propose_partitions()
if len(partitions) == 0:
warn(
"No partition found for the graph. "
+ "This is likely because the graph is not supported by nvFuser. "
+ "Please use the eager ATen mode to execute the graph.",
category=RuntimeWarning,
)
partitioned_graph = partitioner.fuse_partitions(partitions)
return partitioned_graph, any_unsupported
else:
return gm, any_unsupported
def nvfuser_execute_partitioned(gm: GraphModule, *args):
# When possible it's better to use nvfuser_execute directly
# because it avoids PartitionedInterpreter's overhead
gm, is_partitioned = maybe_partition_graph(gm)
if is_partitioned:
return PartitionedInterpreter(gm).run(*args)
else:
return nvfuser_execute(gm, *args)
|
pytorch-master
|
torch/_prims/nvfuser_executor.py
|
import contextlib
import itertools
import math
import operator
import weakref
from enum import Enum
from functools import partial, reduce
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import torch
import torch._prims_common as utils
import torch.library
from torch import Tensor, TypedStorage
from torch._C import _get_default_device
from torch._prims.nvfuser_prims import register_nvprims
from torch._prims_common import (
check,
DimsSequenceType,
DimsType,
Number,
NumberType,
ShapeType,
StrideType,
TensorLike,
TensorLikeType,
type_to_dtype,
)
from torch._prims_common.wrappers import backwards_not_supported
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.overrides import handle_torch_function, has_torch_function
from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
prim = torch.library.Library("prims", "DEF")
prim_impl = torch.library.Library("prims", "IMPL", "CompositeExplicitAutograd")
prim_backend_select_impl = torch.library.Library("prims", "IMPL", "BackendSelect")
prim_autograd_impl = torch.library.Library("prims", "IMPL", "Autograd")
prim_meta_impl = torch.library.Library("prims", "IMPL", "Meta")
# Experimental module containing prototype "primitive" operations.
__all__ = [
#
# Common datastructures and helpers
#
"RETURN_TYPE",
#
# Elementwise unary prims
#
"abs",
"acos",
"acosh",
"asin",
"asinh",
"atan",
"atanh",
"cos",
"cosh",
"bessel_i0",
"bessel_i0e",
"bessel_i1",
"bessel_i1e",
"bitwise_not",
"cbrt",
"ceil",
"conj_physical",
"digamma",
"erf",
"erf_inv",
"erfc",
"exp",
"expm1",
"exp2",
"fill",
"floor",
"imag",
"isfinite",
"lgamma",
"log",
"log1p",
"log2",
"log10",
"neg",
"real",
"reciprocal",
"round",
"sign",
"signbit",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
"trunc",
#
# Elementwise binary prims
#
"add",
"atan2",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
# 'complex', # needs custom meta
"div",
"eq",
"fmax",
"fmin",
"fmod",
"gcd",
"ge",
"gt",
"hypot",
"igamma",
"igammac",
"le",
"lt",
"maximum",
"minimum",
"mul",
"ne",
"nextafter",
"pow",
"remainder",
"rsqrt",
"shift_left",
"shift_right_arithmetic",
"shift_right_logical", # not implemented
"sub",
"zeta",
#
# View prims
#
"as_strided",
"broadcast_in_dim",
"collapse_view",
"conj",
"expand_dims",
"slice",
"slice_in_dim", # implemented using slice -- make this a ref?
"split_dim",
"squeeze",
"transpose",
"view_of",
#
# Shape prims
#
"collapse",
"cat",
"reshape",
"rev",
#
# Conditional prims
#
"where",
#
# Data conversion and movement prims
#
"convert_element_type",
"device_put",
"item",
"maximum_value",
"minimum_value",
"to_dtype",
#
# Inplace prims
#
"copy_to",
"resize",
# "_set", # Commented out, see note below
#
# Reduction prims
#
"amax",
"amin",
"prod",
"sum",
"var",
#
# Tensor Creation Prims
#
"empty_strided",
"scalar_tensor",
"arange",
#
# Linear algebra (linalg) Prims
#
"svd",
#
# Randomness Prims
#
"uniform",
#
# FFT prims
#
"fft_r2c",
"fft_c2c",
"fft_c2r",
]
# In order to keep things like aliasing relationships and storage
# consistent wrt/meta tensors, FakeTensors own a FakeTensorMode
# which caches conversions to Meta Tensors. We would like to use
# one consistent mode among along FakeTensors, which we store here.
# We store a weakref, so that when all previous FakeTensors are
# the present mode will also deallocate. FakeTensorMode holds onto
# tensors that are converted to Meta so we don't want to persist it
# longer than necessary.x
prim_fake_mode_ref = None
def get_prim_fake_mode():
global prim_fake_mode_ref
if prim_fake_mode_ref is None or prim_fake_mode_ref() is None:
mode = FakeTensorMode()
prim_fake_mode_ref = weakref.ref(mode)
return mode
else:
return prim_fake_mode_ref()
def TensorMeta(
tensorlike: Optional[Union[NumberType, torch.Tensor]] = None,
*,
shape: Optional[ShapeType] = None,
strides: Optional[StrideType] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str]] = None,
):
if isinstance(tensorlike, Number):
assert not shape and (shape is None or isinstance(shape, Sequence))
assert not strides and (strides is None or isinstance(strides, Sequence))
inferred_shape: Tuple[int, ...] = ()
inferred_strides: Tuple[int, ...] = ()
inferred_dtype = type_to_dtype(type(tensorlike))
inferred_device = torch.device("cpu")
# TODO: This looks wrong, a number that is wrapped into a tensor
# needs to behave differently than a scalar tensor for type
# promotion purposes
elif tensorlike is not None:
assert isinstance(tensorlike, torch.Tensor)
inferred_shape = tuple(tensorlike.shape)
inferred_strides = tuple(tensorlike.stride())
inferred_dtype = tensorlike.dtype
inferred_device = tensorlike.device
else:
# If no tensorlike "example" is given then all metadata
# must be provided explicitly
assert shape is not None
assert strides is not None
assert dtype is not None
assert device is not None
shape = inferred_shape if shape is None else tuple(shape)
strides = inferred_strides if strides is None else tuple(strides)
dtype = inferred_dtype if dtype is None else dtype
device = inferred_device if device is None else device
if isinstance(device, str):
device = torch.device(device)
if isinstance(tensorlike, FakeTensor):
mode = tensorlike.fake_mode
else:
mode = get_prim_fake_mode()
if device.type == "meta":
return torch.empty_strided(shape, strides, dtype=dtype, device="meta")
else:
# SymInt doesnt support empty_strided yet
if any(
isinstance(inp, torch.SymIntNode) for inp in itertools.chain(shape, strides)
):
meta_t = torch.empty(shape, dtype=dtype, device="meta")
else:
meta_t = torch.empty_strided(shape, strides, dtype=dtype, device="meta")
return FakeTensor(mode, meta_t, device)
#
# Common datastructures and helpers
#
# Describes the return type of the primitive:
#
# - NEW, a new tensor is created
# - VIEW, a view of an input tensor is returned
# - INPLACE, one or more input tensors is modified
#
# these descriptors are mututally exclusive and exhaustive.
class RETURN_TYPE(Enum):
NEW = (0,)
VIEW = (1,)
INPLACE = (2,)
def _wrap_tensor_meta(f):
def wrap(t):
if (
isinstance(t, torch.Tensor)
and not isinstance(t, FakeTensor)
and not t.device.type == "meta"
):
return FakeTensor.from_tensor(t, get_prim_fake_mode())
else:
return t
def wrapper(*args, **kwargs):
wrapped_args = tree_map(wrap, args)
wrapped_kwargs = tree_map(wrap, kwargs)
return f(*wrapped_args, **wrapped_kwargs)
return wrapper
def _make_prim(
*,
schema: str,
return_type: Union[RETURN_TYPE, Tuple[RETURN_TYPE, ...]],
meta: Callable,
impl_aten: Callable,
doc: str,
):
"""
Creates a primitive operation.
"""
prim.define(schema)
def _prim_impl(*args, **kwargs):
# always run the meta function because aten implementation will
# typically accept more inputs (e.g., it will do promotion and
# broadcasting) which we want to reject
meta(*args, **kwargs)
return impl_aten(*args, **kwargs)
# Right now prims don't support autograd (we can and should add an
# argument that provides an implementation for backward here.) Because we
# don't have derivative formulas, we must setup a custom autograd function
# that raises an error if backwards is invoked
def _autograd_impl(*args, **kwargs):
return backwards_not_supported(_prim)(*args, **kwargs)
_meta_impl = _wrap_tensor_meta(meta)
def _backend_select_impl(*args, **kwargs):
if kwargs.get("device") and kwargs["device"].type == "meta":
return _meta_impl(*args, **kwargs)
else:
return _prim_impl(*args, **kwargs)
name = schema.split("(")[0]
prim_impl.impl(name, _prim_impl)
prim_autograd_impl.impl(name, _autograd_impl)
prim_meta_impl.impl(name, _meta_impl)
_prim_packet = getattr(torch.ops.prims, name)
_prim = _prim_packet.default
from torch._subclasses.fake_tensor import contains_tensor_types
if not any(contains_tensor_types(a.type) for a in _prim._schema.arguments):
prim_backend_select_impl.impl(name, _backend_select_impl)
for p in (_prim_packet, _prim):
p.__doc__ = doc
p.return_type = return_type # type: ignore[attr-defined]
p.schema = schema
p.prim_impl = _prim_impl
p.prim_meta_impl = _wrap_tensor_meta(meta)
return _prim
class ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND(Enum):
DEFAULT = (0,)
ALWAYS_BOOL = (2,)
COMPLEX_TO_FLOAT = (3,)
# TODO: implement dtype validation here, too, or on the corresponding refs
def _elementwise_meta(
*args,
type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND,
args_with_fixed_dtypes: Tuple[TensorLikeType, ...] = None,
) -> FakeTensor:
"""
Meta function for elementwise operations that produce outputs in the same dtype
as their inputs.
Stride logic is currently incorrect.
"""
assert len(args) > 0
utils.check_same_dtype(*args)
args_ = list(args)
if args_with_fixed_dtypes is not None:
args_.extend(args_with_fixed_dtypes)
utils.check_same_device(*args_, allow_cpu_scalar_tensors=True)
utils.check_same_shape(*args_, allow_cpu_scalar_tensors=True)
strides = utils.compute_elementwise_output_strides(*args_)
shape = utils.extract_shape(*args_, allow_cpu_scalar_tensors=True)
# Acquires the dtype
dtype = None
scalar_type = None
for arg in args:
if isinstance(arg, TensorLike):
if not utils.is_cpu_scalar_tensor(arg):
dtype = arg.dtype
break
else:
dtype = arg.dtype
elif isinstance(arg, Number):
scalar_type = type(arg)
if dtype is None and scalar_type is not None:
dtype = utils.type_to_dtype(scalar_type)
# Acquires the device (if it exists) or number
device = None
number = None
for arg in args_:
if isinstance(arg, TensorLike):
device = arg.device
break
elif isinstance(arg, Number):
if number is None:
number = arg
# NOTE: type promotion behavior here is mostly hidden from tests because
# references will typically handle the type promotion properly even if this doesn't
# (but getting it wrong will cause too many casts to be inserted in traces!)
if device is not None:
assert dtype is not None
if type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT:
dtype = dtype
elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
dtype = torch.bool
elif type_promotion == ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
if utils.is_complex_dtype(dtype):
dtype = utils.corresponding_real_dtype(dtype)
else:
dtype = dtype
return TensorMeta(device=device, shape=shape, strides=strides, dtype=dtype)
# Number case
# NOTE: this case is not currently exercised
# TODO: fix number type promotion (bool, complex->float)
return TensorMeta(number)
def _complex_only_elementwise_meta(*args, **kwargs):
utils.check(
utils.is_complex_dtype(args[0].dtype), lambda: "Only complex dtype is supported"
)
return _elementwise_meta(*args, **kwargs)
def _make_elementwise_unary_prim(
name: str, *, type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, **kwargs
):
"""
Creates an elementwise unary prim.
"""
return _make_prim(
schema=f"{name}(Tensor self) -> Tensor",
meta=partial(_elementwise_meta, type_promotion=type_promotion),
return_type=RETURN_TYPE.NEW,
**kwargs,
)
def _make_elementwise_binary_prim(
name: str, *, type_promotion: ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND, **kwargs
):
"""
Creates an elementwise binary prim.
"""
return _make_prim(
schema=f"{name}(Tensor self, Tensor other) -> Tensor",
meta=partial(_elementwise_meta, type_promotion=type_promotion),
return_type=RETURN_TYPE.NEW,
**kwargs,
)
def _not_impl(*args, **kwargs):
raise NotImplementedError
#
# Elementwise unary operations
#
abs = _make_elementwise_unary_prim(
"abs",
impl_aten=torch.abs,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
)
acos = _make_elementwise_unary_prim(
"acos",
impl_aten=torch.acos,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
acosh = _make_elementwise_unary_prim(
"acosh",
impl_aten=torch.acosh,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
asin = _make_elementwise_unary_prim(
"asin",
impl_aten=torch.asin,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
asinh = _make_elementwise_unary_prim(
"asinh",
impl_aten=torch.asinh,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
atan = _make_elementwise_unary_prim(
"atan",
impl_aten=torch.atan,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
atanh = _make_elementwise_unary_prim(
"atanh",
impl_aten=torch.atanh,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
cos = _make_elementwise_unary_prim(
"cos",
impl_aten=torch.cos,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
cosh = _make_elementwise_unary_prim(
"cosh",
impl_aten=torch.cosh,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bessel_i0 = _make_elementwise_unary_prim(
"bessel_i0",
impl_aten=torch.i0,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bessel_i0e = _make_elementwise_unary_prim(
"bessel_i0e",
impl_aten=torch.special.i0e,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bessel_i1 = _make_elementwise_unary_prim(
"bessel_i1",
impl_aten=torch.special.i1,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bessel_i1e = _make_elementwise_unary_prim(
"bessel_i1e",
impl_aten=torch.special.i1e,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bitwise_not = _make_elementwise_unary_prim(
"bitwise_not",
impl_aten=torch.bitwise_not,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
def _cbrt_aten(a: torch.Tensor) -> Tensor:
utils.check(
not a.is_complex(),
lambda: "cbrt: Complex inputs not supported. Consider calling torch.pow(a, 1.0/3.0)",
)
# Returns the real cubic root of the number.
# Note that if a < 0, pow(a, (1. / 3.)) returns th complex number
# exp(1/3 * log(a)) = exp(1/3 * (log(abs(a)) + pi*i)) = cbrt(abs(a)) * e^{pi/3*i}
# which is a complex number.
# For more info see the section Note in
# https://en.cppreference.com/w/cpp/numeric/math/cbrt
return torch.copysign(torch.pow(a.abs(), 1 / 3), a)
cbrt = _make_elementwise_unary_prim(
"cbrt",
impl_aten=_cbrt_aten,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
ceil = _make_elementwise_unary_prim(
"ceil",
impl_aten=torch.ceil,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
def _conj_physical_meta(input: TensorLikeType) -> TensorLikeType:
if not input.dtype.is_complex:
raise RuntimeError("prims.conj_physical is only defined for complex dtypes")
strides = utils.compute_elementwise_output_strides(input)
return TensorMeta(input, strides=strides)
conj_physical = _make_prim(
schema="conj_physical(Tensor self) -> Tensor",
meta=_conj_physical_meta,
impl_aten=torch._conj_physical,
doc="Returns the physical conjugation of a complex tensor",
return_type=RETURN_TYPE.NEW,
)
digamma = _make_elementwise_unary_prim(
"digamma",
impl_aten=torch.digamma,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
erf = _make_elementwise_unary_prim(
"erf",
impl_aten=torch.erf,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
erf_inv = _make_elementwise_unary_prim(
"erf_inv",
impl_aten=torch.special.erfinv,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
erfc = _make_elementwise_unary_prim(
"erfc",
impl_aten=torch.special.erfc,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
exp = _make_elementwise_unary_prim(
"exp",
impl_aten=torch.exp,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
expm1 = _make_elementwise_unary_prim(
"expm1",
impl_aten=torch.special.expm1,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
exp2 = _make_elementwise_unary_prim(
"exp2",
impl_aten=torch.special.exp2,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
def _fill_meta(a: TensorLikeType, value: NumberType) -> TensorLikeType:
return _elementwise_meta(
a, type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT
)
# See https://github.com/pytorch/pytorch/issues/77932 for out-of-place fill request
def _fill_aten(a: Tensor, value: NumberType) -> Tensor:
t = a * False
with torch.no_grad():
t.fill_(value) # type: ignore[arg-type]
return t
# NOTE: fill uses _make_prim directly because it has a value parameter
fill = _make_prim(
schema="fill(Tensor self, Scalar value) -> Tensor",
return_type=RETURN_TYPE.NEW,
meta=_fill_meta,
impl_aten=_fill_aten,
doc="",
)
floor = _make_elementwise_unary_prim(
"floor",
impl_aten=torch.floor,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
imag = _make_prim(
schema="imag(Tensor self) -> Tensor",
meta=partial(
_complex_only_elementwise_meta,
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
),
return_type=RETURN_TYPE.VIEW,
impl_aten=torch.imag,
doc="",
)
isfinite = _make_elementwise_unary_prim(
"isfinite",
impl_aten=torch.isfinite,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
)
lgamma = _make_elementwise_unary_prim(
"lgamma",
impl_aten=torch.lgamma,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
log = _make_elementwise_unary_prim(
"log",
impl_aten=torch.log,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
log1p = _make_elementwise_unary_prim(
"log1p",
impl_aten=torch.log1p,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
log2 = _make_elementwise_unary_prim(
"log2",
impl_aten=torch.log2,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
log10 = _make_elementwise_unary_prim(
"log10",
impl_aten=torch.log10,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
real = _make_prim(
schema="real(Tensor self) -> Tensor",
meta=partial(
_complex_only_elementwise_meta,
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
),
return_type=RETURN_TYPE.VIEW,
impl_aten=torch.real,
doc="",
)
reciprocal = _make_elementwise_unary_prim(
"reciprocal",
impl_aten=torch.reciprocal,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
neg = _make_elementwise_unary_prim(
"neg",
impl_aten=torch.neg,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
round = _make_elementwise_unary_prim(
"round",
impl_aten=torch.round,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
rsqrt = _make_elementwise_unary_prim(
"rsqrt",
impl_aten=torch.rsqrt,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
sign = _make_elementwise_unary_prim(
"sign",
impl_aten=torch.sign,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
signbit = _make_elementwise_unary_prim(
"signbit",
impl_aten=torch.signbit,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
sin = _make_elementwise_unary_prim(
"sin",
impl_aten=torch.sin,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
sinh = _make_elementwise_unary_prim(
"sinh",
impl_aten=torch.sinh,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
sqrt = _make_elementwise_unary_prim(
"sqrt",
impl_aten=torch.sqrt,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
tan = _make_elementwise_unary_prim(
"tan",
impl_aten=torch.tan,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
tanh = _make_elementwise_unary_prim(
"tanh",
impl_aten=torch.tanh,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
trunc = _make_elementwise_unary_prim(
"trunc",
impl_aten=torch.trunc,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
#
# Elementwise binary operations
#
add = _make_elementwise_binary_prim(
name="add",
impl_aten=torch.add,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
atan2 = _make_elementwise_binary_prim(
name="atan2",
impl_aten=torch.atan2,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bitwise_and = _make_elementwise_binary_prim(
"bitwise_and",
impl_aten=torch.bitwise_and,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bitwise_or = _make_elementwise_binary_prim(
"bitwise_or",
impl_aten=torch.bitwise_or,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
bitwise_xor = _make_elementwise_binary_prim(
"bitwise_xor",
impl_aten=torch.bitwise_xor,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
# TODO: complex needs a special meta to account for its float -> complex behavior
# complex = _make_elementwise_binary_prim(
# impl_aten=torch.complex,
# doc="",
# )
# div prim performs truncation division on integer inputs
# and true division for floating and complex inputs
def _div_aten(a, b):
is_integral = isinstance(a, (bool, int)) or (
isinstance(a, torch.Tensor) and utils.is_integer_dtype(a.dtype)
)
if is_integral:
return torch.div(a, b, rounding_mode="trunc")
else:
return torch.true_divide(a, b)
div = _make_elementwise_binary_prim(
"div",
impl_aten=_div_aten,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
eq = _make_elementwise_binary_prim(
"eq",
impl_aten=torch.eq,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
)
fmax = _make_elementwise_binary_prim(
"fmax",
impl_aten=torch.fmax,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
fmin = _make_elementwise_binary_prim(
"fmin",
impl_aten=torch.fmin,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
fmod = _make_elementwise_binary_prim(
"fmod",
impl_aten=torch.fmod,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
gcd = _make_elementwise_binary_prim(
"gcd",
impl_aten=torch.gcd,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
ge = _make_elementwise_binary_prim(
"ge",
impl_aten=torch.ge,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
)
gt = _make_elementwise_binary_prim(
"gt",
impl_aten=torch.gt,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
)
hypot = _make_elementwise_binary_prim(
"hypot",
impl_aten=torch.hypot,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
igamma = _make_elementwise_binary_prim(
"igamma",
impl_aten=torch.special.gammainc,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
igammac = _make_elementwise_binary_prim(
"igammac",
impl_aten=torch.special.gammaincc,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
le = _make_elementwise_binary_prim(
"le",
impl_aten=torch.le,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
)
lt = _make_elementwise_binary_prim(
"lt",
impl_aten=torch.lt,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
)
# Note: the following impls are because torch.maximum and torch.mininum do not support scalar inputs
def _maximum_aten(
a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]
) -> TensorLikeType:
if isinstance(a, TensorLike) and isinstance(b, Number):
b = scalar_tensor(b, dtype=a.dtype, device=a.device)
elif isinstance(b, TensorLike) and isinstance(a, Number):
a = scalar_tensor(a, dtype=b.dtype, device=b.device)
return torch.maximum(a, b) # type: ignore[arg-type]
maximum = _make_elementwise_binary_prim(
"maximum",
impl_aten=_maximum_aten,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
def _minimum_aten(
a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]
) -> TensorLikeType:
if isinstance(a, TensorLike) and isinstance(b, Number):
b = scalar_tensor(b, dtype=a.dtype, device=a.device)
elif isinstance(b, TensorLike) and isinstance(a, Number):
a = scalar_tensor(a, dtype=b.dtype, device=b.device)
return torch.minimum(a, b) # type: ignore[arg-type]
minimum = _make_elementwise_binary_prim(
"minimum",
impl_aten=_minimum_aten,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
mul = _make_elementwise_binary_prim(
"mul",
impl_aten=torch.mul,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
ne = _make_elementwise_binary_prim(
"ne",
impl_aten=torch.ne,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.ALWAYS_BOOL,
)
nextafter = _make_elementwise_binary_prim(
"nextafter",
impl_aten=torch.nextafter,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
pow = _make_elementwise_binary_prim(
"pow",
impl_aten=torch.pow,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
remainder = _make_elementwise_binary_prim(
"remainder",
impl_aten=torch.remainder,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
shift_left = _make_elementwise_binary_prim(
"shift_left",
impl_aten=torch.bitwise_left_shift,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
shift_right_arithmetic = _make_elementwise_binary_prim(
"shift_right_arithmetic",
impl_aten=torch.bitwise_right_shift,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
shift_right_logical = _not_impl
sub = _make_elementwise_binary_prim(
"sub",
impl_aten=torch.sub,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
zeta = _make_elementwise_binary_prim(
"zeta",
impl_aten=torch.special.zeta,
doc="",
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
)
#
# View operations
#
# TODO: model view relationships
# TODO: model storage
def _as_strided_meta(
a: TensorLikeType, size: ShapeType, stride: StrideType, storage_offset: int
) -> TensorLikeType:
assert len(size) == len(stride)
assert storage_offset >= 0
utils.validate_strides(stride)
utils.validate_shape(size)
if reduce(operator.mul, size) == 0:
# NOTE: This special case is to avoid having to acquire the storage below
# as_strided to shapes with no elements are trivially valid, so it's OK
pass
elif isinstance(a, torch.Tensor):
utils.check_in_bounds_for_storage(a.storage(), size, stride, storage_offset)
return TensorMeta(a, shape=size, strides=stride)
def _as_strided_aten(
a: Tensor, size: ShapeType, stride: StrideType, storage_offset: int
) -> Tensor:
return torch.as_strided(a, size, stride, storage_offset)
_as_strided_doc = """
Creates a view of the tensor with the given shape (size), strides (stride) and
storage offset (storage_offset).
"""
as_strided = _make_prim(
schema="as_strided(Tensor(a!) a, int[] size, int[] stride, int storage_offset) -> Tensor(a!)",
meta=_as_strided_meta,
impl_aten=_as_strided_aten,
return_type=RETURN_TYPE.VIEW,
doc=_as_strided_doc,
)
def _broadcast_in_dim_meta(
a: TensorLikeType, shape: ShapeType, broadcast_dimensions: Sequence[int]
):
# Type checks
assert isinstance(a, TensorLike)
assert isinstance(shape, Sequence)
assert isinstance(broadcast_dimensions, Sequence)
# every dimension must be accounted for
assert a.ndim == len(broadcast_dimensions)
# broadcast shape must have weakly more dimensions
assert len(shape) >= a.ndim
# broadcast_dimensions must be an ascending sequence
# (no relative reordering of dims) of integers and
# each dimension must be within the new shape
def _greater_than_reduce(acc, x):
assert isinstance(x, int)
assert x > acc
assert x < len(shape)
return x
reduce(lambda acc, x: _greater_than_reduce(acc, x), broadcast_dimensions, -1)
# shape must be broadcastable to
for idx, new_idx in enumerate(broadcast_dimensions):
assert a.shape[idx] == 1 or a.shape[idx] == shape[new_idx]
new_strides = []
original_idx = 0
for idx in range(len(shape)):
if idx in broadcast_dimensions:
# Assigns a stride of zero to dimensions
# which were actually broadcast
if a.shape[original_idx] != shape[idx]:
new_strides.append(0)
else:
new_strides.append(a.stride()[original_idx])
original_idx = original_idx + 1
else:
new_strides.append(0)
return TensorMeta(a, shape=shape, strides=new_strides)
def _broadcast_in_dim_aten(a, shape, broadcast_dimensions):
s = list(shape)
for broadcast_dimension in broadcast_dimensions:
s[broadcast_dimension] = -1
v = a
for idx, x in enumerate(s):
if x != -1:
v = v.unsqueeze(idx)
return v.expand(shape)
_broadcast_in_dim_doc = """
Creates a view of a with the specified shape.
Allows adding dimensions of any length and broadcasting
dimensions of length one in a to any length.
The location of the broadcast dimensions must be specified
using the broadcast_dimensions argument. Changing the
relative order of dimensions is not supported.
"""
broadcast_in_dim = _make_prim(
schema="broadcast_in_dim(Tensor(a) a, SymInt[] shape, int[] broadcast_dimensions) -> Tensor(a)",
meta=_broadcast_in_dim_meta,
impl_aten=_broadcast_in_dim_aten,
return_type=RETURN_TYPE.VIEW,
doc=_broadcast_in_dim_doc,
)
def _collapse_view_helper(
a: TensorLikeType, start: int, end: int
) -> Tuple[Optional[ShapeType], Optional[StrideType]]:
assert isinstance(a, TensorLike)
# Special-case for zero dimensional tensors
if a.ndim == 0:
shape = (1,)
strides = (1,)
else:
shape = a.shape # type: ignore[assignment]
strides = a.stride()
utils.validate_idx(len(shape), start)
utils.validate_exclusive_idx(len(shape), end)
# Verifies end is strictly greater than start
# (Collapse requires a non-empty interval)
if end <= start:
msg = "Attempting to collapse but end, {0}, is less than or equal to start, {1}!".format(
end, start
)
raise ValueError(msg)
if a.ndim == 0 or (end - 1 == start):
return shape, strides
length = shape[end - 1]
stride = strides[end - 1]
for idx in reversed(range(start, end - 1)):
if shape[idx] == 0 or shape[idx + 1] == 0:
length = 0
stride = 0
break
if shape[idx] == 1:
continue
length = length * shape[idx]
stride = min(stride, strides[idx])
if (
a.numel() > 0
and shape[idx + 1] != 1
and not (strides[idx] == strides[idx + 1] * shape[idx + 1])
):
return None, None
new_shape = shape[:start] + (length,) + shape[end:]
new_strides = strides[:start] + (stride,) + strides[end:]
# NOTE: when the input has no elements it's restrided as if it were contiguous
if a.numel() == 0:
new_strides = utils.make_contiguous_strides_for(new_shape)
return new_shape, new_strides
def _collapse_view_meta(a: TensorLikeType, start: int, end: int) -> TensorLikeType:
new_shape, new_strides = _collapse_view_helper(a, start, end)
if new_shape is None:
msg = "Attempting to view a collapsed tensor, but no such view exists!"
raise ValueError(msg)
return TensorMeta(a, shape=new_shape, strides=new_strides)
def _collapse_view_aten(a: Tensor, start: int, end: int) -> Tensor:
# Special-cases zero-dim tensors
if a.ndim == 0:
shape = (1,)
else:
shape = a.shape # type: ignore[assignment]
dim_length = 1
for idx in range(start, end):
dim_length = dim_length * shape[idx]
new_shape = shape[0:start] + (dim_length,) + shape[end:]
return a.view(new_shape)
_collapse_view_doc = """
Creates a view of a with the dimensions between
start (inclusive) and end (exclusive) merged into a
single dimension.
If it's not possible to take such a view then an error
is thrown. See collapse instead.
The dimensions can be merged if and only if
they are all "nested" with each other. That is, they all
have the property that
stride[i] = stride[i+1] * shape[i+1]
for all i in [start, end - 1).
"""
collapse_view = _make_prim(
schema="collapse_view(Tensor(a) a, int start, int end) -> Tensor(a)",
meta=_collapse_view_meta,
impl_aten=_collapse_view_aten,
return_type=RETURN_TYPE.VIEW,
doc=_collapse_view_doc,
)
def _conj_meta(a: TensorLikeType) -> TensorLikeType:
if not a.dtype.is_complex:
raise RuntimeError("Expected complex dtype in prims.conj")
return TensorMeta(a)
_conj_doc = """
Returns a conjugated view of the original tensor
"""
conj = _make_prim(
schema="conj(Tensor(a) a) -> Tensor(a)",
meta=_conj_meta,
impl_aten=torch.conj,
return_type=RETURN_TYPE.VIEW,
doc=_conj_doc,
)
def expand_dims(a: TensorLikeType, dimensions: DimsSequenceType) -> TensorLikeType:
"""
Creates a view of a with a.ndim + len(dimensions) dimensions, with new
dimensions of length one at the dimensions specified by dimensions.
"""
dims = sorted(utils.canonicalize_dims(a.ndim, dimensions)) # type: ignore[arg-type]
if len(set(dims)) != len(dims):
msg = "Received duplicate dimensions to expand in {0}".format(str(dimensions))
raise ValueError(msg)
new_shape = list(a.shape)
for idx in dims:
new_shape.insert(idx, 1)
broadcast_dimensions = [
idx for idx in range(len(new_shape)) if idx not in dimensions
]
return broadcast_in_dim(a, new_shape, broadcast_dimensions)
# Note: saves the Python slice object because we're about to clobber its name with the slice prim
pyslice: Type[slice] = slice # type: ignore[has-type]
def _slice_meta(
a: TensorLikeType,
start_indices: DimsSequenceType,
limit_indices: DimsSequenceType,
strides: Optional[StrideType] = None,
) -> TensorLikeType:
_strides = strides if strides is not None else [1] * len(start_indices)
if a.ndim != len(start_indices):
msg = "Attempting to slice tensor of rank {0} with start_indices of length {1}!".format(
a.ndim, len(start_indices)
)
raise ValueError(msg)
if a.ndim != len(limit_indices):
msg = "Attempting to slice tensor of rank {0} with limit_indices of length {1}!".format(
a.ndim, len(limit_indices)
)
raise ValueError(msg)
if a.ndim != len(_strides):
msg = (
"Attempting to slice tensor of rank {0} with strides of length {1}!".format(
a.ndim, len(limit_indices)
)
)
raise ValueError(msg)
for x, y in zip(start_indices, a.shape):
if x < 0:
msg = "Attempting to slice a tensor with a negative start index of {0}!".format(
x
)
raise ValueError(msg)
if x > y:
msg = (
"Attempting to slice a tensor but a start index in {0} is greater than"
" the length of its corresponding dimension in shape {1}".format(
start_indices, a.shape
)
)
raise ValueError(msg)
for x, y, z in zip(limit_indices, a.shape, start_indices):
if x < 0:
msg = "Attempting to slice a tensor with a negative stop index of {0}!".format(
x
)
raise ValueError(msg)
if x > y:
msg = (
"Attempting to slice a tensor but a stop index in {0} is greater than the length of "
" its corresponding dimension in shape {1}".format(
limit_indices, a.shape
)
)
raise ValueError(msg)
if x < z:
msg = (
"Attempting to slice a tensor but a start index in {0} is greater than "
" its corresponding stop index {1}".format(x, z)
)
for x in _strides:
if x <= 0:
msg = (
"Attempting to slice a tensor with a non-positive step of {0}!".format(
x
)
)
raise ValueError(msg)
new_shape = []
for x, y, z in zip(start_indices, limit_indices, _strides):
new_shape.append(math.floor((y - x) / z))
new_strides = []
for x, y in zip(a.stride(), _strides):
new_strides.append(x * y)
return TensorMeta(a, shape=new_shape, strides=new_strides)
def _slice_aten(
a: Tensor,
start_indices: DimsSequenceType,
limit_indices: DimsSequenceType,
strides: Optional[StrideType] = None,
) -> Tensor:
_strides = strides if strides is not None else [1] * len(start_indices)
slices = []
for start, stop, step in zip(start_indices, limit_indices, _strides):
slices.append(pyslice(start, stop, step))
return operator.getitem(a, slices) # type: ignore[call-overload]
_slice_doc = """
Creates a view of a "bounding box" within the tensor.
The bounding box is specified independently in each of the tensor's dimensions.
start_indices and limit_indices describe the box's boundaries for their corresponding
dimensions. If strides is specified then they specify the step size between elements
in their corresponding dimension.
This operation is analogous to slicing in NumPy, but does not permit slices where
the stop indices are less than the start indices.
"""
slice = _make_prim(
schema="slice(Tensor(a) a, int[] start_indices, int[] limit_indices, int[]? strides=None) -> Tensor(a)",
meta=_slice_meta,
impl_aten=_slice_aten,
return_type=RETURN_TYPE.VIEW,
doc=_slice_doc,
)
def _slice_in_dim_meta(
a: TensorLikeType,
start_index: int,
limit_index: int,
stride: int = 1,
axis: int = 0,
) -> TensorLikeType:
if axis < 0:
msg = "slice_in_dim: received a negative axis {0}".format(axis)
raise ValueError(msg)
if axis >= a.ndim:
msg = "slice_in_dim: axis {0} is greater or equal to the rank {1} of the tensor".format(
axis, a.ndim
)
raise ValueError(msg)
if start_index < 0:
msg = "slice_in_dim: received a negative start_index {0}".format(start_index)
raise ValueError(msg)
if start_index > a.shape[axis]:
msg = "slice_in_dim: start_index is greater than the length {0} of dimension {1}".format(
start_index, axis
)
raise ValueError(msg)
if limit_index > a.shape[axis]:
msg = "slice_in_dim: limit_index is greater than the length {0} of dimension {1}".format(
limit_index, axis
)
raise ValueError(msg)
if limit_index < start_index:
msg = "slice_in_dim: received a limit_index {0} less than the start_index {1}".format(
limit_index, start_index
)
raise ValueError(msg)
if stride < 0:
msg = "slice_in_dim: received a non-positive stride of {0}!".format(stride)
raise ValueError(msg)
start_indices = [0] * a.ndim
limit_indices = list(a.shape)
strides = [1] * a.ndim
start_indices[axis] = start_index
limit_indices[axis] = limit_index
strides[axis] = stride
return _slice_meta(a, start_indices, limit_indices, strides)
def _slice_in_dim_aten(
a: Tensor,
start_index: int,
limit_index: int,
stride: int = 1,
axis: int = 0,
) -> Tensor:
start_indices = [0] * a.ndim
limit_indices = list(a.shape)
strides = [1] * a.ndim
start_indices[axis] = start_index
limit_indices[axis] = limit_index
strides[axis] = stride
return slice(a, start_indices, limit_indices, strides)
_slice_in_dim_doc = """
Convenience wrapper for slicing just one dimension using slice.
"""
slice_in_dim = _make_prim(
schema="slice_in_dim(Tensor(a) a, int start_index, int limit_index, int stride=1, int axis=0) -> Tensor(a)",
meta=_slice_in_dim_meta,
impl_aten=_slice_in_dim_aten,
return_type=RETURN_TYPE.VIEW,
doc=_slice_in_dim_doc,
)
def _split_dim_meta(a: TensorLikeType, dim: int, outer_length: int) -> TensorLikeType:
assert isinstance(a, TensorLike)
utils.validate_idx(a.ndim, dim)
utils.validate_dim_length(outer_length)
# Verifies the dim can be split with the specified lhs_length
_inner_length = a.shape[dim] / outer_length
inner_length: int = int(_inner_length)
if inner_length != _inner_length:
msg = "Attempting to split dimension of length {0}, but outer length of {1} divides it with a remainder!".format(
a.shape[dim], outer_length
)
raise ValueError(msg)
new_shape: List[int] = []
new_strides: List[int] = []
for idx in range(a.ndim):
if idx == dim:
new_shape.extend((outer_length, inner_length))
new_strides.extend((a.stride()[idx] * inner_length, a.stride()[idx]))
else:
new_shape.append(a.shape[idx])
new_strides.append(a.stride()[idx])
return TensorMeta(a, shape=new_shape, strides=new_strides)
def _split_dim_aten(a: Tensor, dim: int, outer_length: int) -> Tensor:
inner_length = int(a.shape[dim] / outer_length)
new_shape = a.shape[0:dim] + (outer_length, inner_length) + a.shape[dim + 1 :]
return a.view(new_shape)
_split_dim_doc = """
Creates a view of a with the given dimension (of length l) split
into two dimensions, with the outer of the two having
length outer_length and the inner of the two having computed
length inner_length such outer_length * inner_length = l.
"""
# TODO: consider renaming split_dim_view
split_dim = _make_prim(
schema="split_dim(Tensor(a) a, int dim, int outer_length) -> Tensor(a)",
meta=_split_dim_meta,
impl_aten=_split_dim_aten,
return_type=RETURN_TYPE.VIEW,
doc=_split_dim_doc,
)
# Note: allows dimensions to be specified redundantly
def _squeeze_meta(a: TensorLikeType, dimensions: Sequence) -> TensorLikeType:
assert isinstance(a, TensorLike)
for idx in dimensions:
utils.validate_idx(a.ndim, idx)
assert a.shape[idx] == 1
new_shape = []
new_strides = []
for idx in range(len(a.shape)):
if idx in dimensions:
continue
new_shape.append(a.shape[idx])
new_strides.append(a.stride()[idx])
return TensorMeta(a, shape=new_shape, strides=new_strides)
def _squeeze_aten(a: Tensor, dimensions: Sequence) -> Tensor:
for idx in reversed(sorted(dimensions)):
a = torch.squeeze(a, dim=idx)
return a
_squeeze_doc = """
Creates a view of the tensor with the specified dimensions removed.
The removed dimensions must each have length one.
"""
squeeze = _make_prim(
schema="squeeze(Tensor(a) a, int[] dimensions) -> Tensor(a)",
meta=_squeeze_meta,
impl_aten=_squeeze_aten,
return_type=RETURN_TYPE.VIEW,
doc=_squeeze_doc,
)
def _transpose_meta(a: TensorLikeType, permutation: DimsSequenceType) -> TensorLikeType:
if a.ndim != len(permutation):
msg = "Attempting to permute a tensor of rank {0}, but received a permutation of length {1}!".format(
a.ndim, len(permutation)
)
raise ValueError(msg)
if not utils.is_valid_permutation(a.ndim, permutation):
msg = "Received an invalid permutation, {0}!".format(permutation)
raise ValueError(msg)
new_shape = [0] * a.ndim
new_strides = [0] * a.ndim
for idx, dim in enumerate(permutation):
new_shape[idx] = a.shape[dim]
new_strides[idx] = a.stride()[dim]
return TensorMeta(a, shape=tuple(new_shape), strides=tuple(new_strides))
def _transpose_aten(a: Tensor, permutation: DimsSequenceType) -> Tensor:
return torch.permute(a, permutation)
_transpose_doc = """
Creates a view of the tensor with its dimensions permuted.
The length of the permutation must be the rank of the tensor,
and each element of the permutation specifies the new order
for the corresponding dimension.
"""
transpose = _make_prim(
schema="transpose(Tensor(a) a, int[] permutation) -> Tensor(a)",
meta=_transpose_meta,
impl_aten=_transpose_aten,
return_type=RETURN_TYPE.VIEW,
doc=_transpose_doc,
)
def _view_of_meta(a: TensorLikeType) -> TensorLikeType:
return TensorMeta(a)
def _view_of_aten(a: Tensor) -> Tensor:
return a.view(a.shape)
_view_of_doc = """
Creates a view of the tensor.
"""
view_of = _make_prim(
schema="view_of(Tensor(a) a) -> Tensor",
meta=_view_of_meta,
impl_aten=_view_of_aten,
return_type=RETURN_TYPE.VIEW,
doc=_view_of_doc,
)
#
# Shape operations
#
def collapse(a: Tensor, start: int, end: int) -> Tensor:
"""
Wrapper around reshape that collapses a span of dimensions.
See collapse_view for the corresponding view operation.
"""
dim_length = 1
for idx in range(start, end):
dim_length = dim_length * a.shape[idx]
new_shape = a.shape[0:start] + (dim_length,) + a.shape[end:]
return reshape(a, new_shape)
# TODO: review stride logic
def _cat_meta(tensors: Sequence[TensorLikeType], dim: int) -> TensorLikeType:
# Verifies same shape (except in the concat dimension)
shape = tensors[0].shape
concat_length = 0
for tensor in tensors:
for idx, (common_length, length) in enumerate(zip(shape, tensor.shape)):
if idx == dim:
concat_length = concat_length + length
else:
assert length == common_length
new_shape = list(tensors[0].shape).copy()
new_shape[dim] = concat_length
return TensorMeta(
tensors[0],
shape=new_shape,
strides=utils.make_contiguous_strides_for(new_shape),
)
def _cat_aten(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: int) -> Tensor:
return torch.cat(tensors, dim)
_cat_doc = """
Concatenates tensors along the specified dimension.
The tensors' shapes must have the same rank and same length for other dimensions.
"""
cat = _make_prim(
schema="cat(Tensor[] tensors, int dim) -> Tensor",
meta=_cat_meta,
impl_aten=_cat_aten,
return_type=RETURN_TYPE.NEW,
doc=_cat_doc,
)
def _reshape_meta(a: TensorLikeType, shape: ShapeType):
assert isinstance(a, TensorLike)
utils.validate_shape(shape)
# Validates the tensor and the requested shape have the
# same number of elements
numel = reduce(operator.mul, shape)
if numel != a.numel():
msg = "Attempting to reshape a tensor with {0} elements to a shape with {1} elements!".format(
a.numel(), numel
)
raise ValueError(msg)
return TensorMeta(a, shape=shape, strides=utils.make_contiguous_strides_for(shape))
def _reshape_aten(a: Tensor, shape: ShapeType) -> Tensor:
return a.reshape(shape).contiguous().clone()
_reshape_doc = """
Creates a contiguous tensor with the specified shape
containing a copy of the data in a.
"""
reshape = _make_prim(
schema="reshape(Tensor a, int[] shape) -> Tensor",
meta=_reshape_meta,
impl_aten=_reshape_aten,
return_type=RETURN_TYPE.NEW,
doc=_reshape_doc,
)
def _rev_meta(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType:
utils.validate_dimension_indices(a.ndim, dims)
return TensorMeta(a)
_rev_doc = """
Reverses the order of elements along the given dimensions.
"""
rev = _make_prim(
schema="rev(Tensor a, int[] dims) -> Tensor",
meta=_rev_meta,
impl_aten=torch.flip,
return_type=RETURN_TYPE.NEW,
doc=_rev_doc,
)
#
# Conditional prims
#
def _where_meta(
pred: TensorLikeType, a: TensorLikeType, b: TensorLikeType
) -> TensorLikeType:
return _elementwise_meta(
a,
b,
type_promotion=ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND.DEFAULT,
args_with_fixed_dtypes=(pred,),
)
_where_doc = """
Selects elements from a and b according to pred.
Where pred is true the result contains the element from a, and
where pred is false the result contains the element from b.
"""
where = _make_prim(
schema="where(Tensor pred, Tensor a, Tensor b) -> Tensor",
meta=_where_meta,
impl_aten=torch.where,
return_type=RETURN_TYPE.NEW,
doc=_where_doc,
)
#
# Type conversions
#
def _convert_element_type_meta(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType:
# Type checks
assert isinstance(a, TensorLike)
assert isinstance(dtype, torch.dtype)
strides = utils.compute_elementwise_output_strides(a)
return TensorMeta(a, strides=strides, dtype=dtype)
def _convert_element_type_aten(a: Tensor, dtype: torch.dtype) -> Tensor:
# Propagates requires grad when possible
if not utils.is_grad_dtype(dtype):
requires_grad = False
else:
# TODO: update meta objects so this can be acquired directly
try:
requires_grad = a.requires_grad
except Exception as e:
requires_grad = False
result = torch.empty_like(
a, device=a.device, dtype=dtype, requires_grad=requires_grad
)
with torch.no_grad():
return copy_to(result, a)
_convert_element_type_doc = """
Creates a copy of a tensor with the given dtype.
"""
convert_element_type = _make_prim(
schema="convert_element_type(Tensor a, ScalarType dtype) -> Tensor",
meta=_convert_element_type_meta,
impl_aten=_convert_element_type_aten,
return_type=RETURN_TYPE.NEW,
doc=_convert_element_type_doc,
)
def _device_put_meta(
a: TensorLikeType, device: Union[str, torch.device]
) -> TensorLikeType:
assert isinstance(a, TensorLike)
assert isinstance(device, (str, torch.device))
return TensorMeta(a, device=utils.canonicalize_device(device))
def _device_put_aten(a: Tensor, device: Union[str, torch.device]) -> Tensor:
return a.to(device)
_device_put_doc = """
Creates a copy of a tensor on the given device.
"""
device_put = _make_prim(
schema="device_put(Tensor a, Device device) -> Tensor",
meta=_device_put_meta,
impl_aten=_device_put_aten,
return_type=RETURN_TYPE.NEW,
doc=_device_put_doc,
)
# NOTE: need to model meta scalars
# See https://github.com/pytorch/pytorch/issues/78070
def _item_meta(a: TensorLikeType) -> FakeTensor:
number_type = utils.dtype_to_type(a.dtype)
return TensorMeta(number_type(-1))
_item_doc = """
Converts a tensor with one element to a Python number.
"""
# TODO: create a new return type for scalars?
# FIXME: currently returns integers for boolean tensors
# https://github.com/pytorch/pytorch/issues/78071
item = _make_prim(
schema="item(Tensor a) -> Scalar",
meta=_item_meta,
impl_aten=torch.Tensor.item,
return_type=RETURN_TYPE.NEW,
doc=_item_doc,
)
# NOTE: need to model meta scalars
# See https://github.com/pytorch/pytorch/issues/78070
def _maximum_value_meta(dtype: torch.dtype) -> FakeTensor:
number_type = utils.dtype_to_type(dtype)
return TensorMeta(number_type(-1))
def _maximum_value_aten(dtype: torch.dtype):
if dtype == torch.bool:
return True
elif dtype.is_complex or dtype.is_floating_point:
return torch.finfo(dtype).max
else:
return torch.iinfo(dtype).max
_maximum_value_doc = """
Return the maximum finite value for a dtype.
"""
# TODO: create a new return type for scalars?
# FIXME: currently returns integers for boolean tensors
# https://github.com/pytorch/pytorch/issues/78071
maximum_value = _make_prim(
schema="maximum_value(ScalarType dtype) -> Scalar",
meta=_maximum_value_meta,
impl_aten=_maximum_value_aten,
return_type=RETURN_TYPE.NEW,
doc=_maximum_value_doc,
)
# NOTE: need to model meta scalars
# See https://github.com/pytorch/pytorch/issues/78070
def _minimum_value_meta(dtype: torch.dtype) -> FakeTensor:
number_type = utils.dtype_to_type(dtype)
return TensorMeta(number_type(-1))
def _minimum_value_aten(dtype: torch.dtype):
if dtype == torch.bool:
return False
elif dtype.is_complex or dtype.is_floating_point:
return torch.finfo(dtype).min
else:
return torch.iinfo(dtype).min
_minimum_value_doc = """
Return the mimimum finite value for a dtype.
"""
# TODO: create a new return type for scalars?
# FIXME: currently returns integers for boolean tensors
# https://github.com/pytorch/pytorch/issues/78071
minimum_value = _make_prim(
schema="minium_value(ScalarType dtype) -> Scalar",
meta=_minimum_value_meta,
impl_aten=_minimum_value_aten,
return_type=RETURN_TYPE.NEW,
doc=_minimum_value_doc,
)
# TODO: FIXME: strides are incorrect
def _to_dtype_meta(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType:
strides = utils.make_contiguous_strides_for(a.shape)
return TensorMeta(a, strides=strides, dtype=dtype)
def _to_dtype_aten(a: Tensor, dtype: torch.dtype) -> Tensor:
return a.to(dtype)
_to_dtype_doc = """
Creates a contiguous copy of a tensor with the given dtype.
"""
to_dtype = _make_prim(
schema=("to_dtype(Tensor a, ScalarType dtype) -> Tensor"),
meta=_to_dtype_meta,
impl_aten=_to_dtype_aten,
return_type=RETURN_TYPE.NEW,
doc=_to_dtype_doc,
)
#
# Inplace operators
#
def _copy_to_meta(a: TensorLikeType, b: TensorLikeType):
assert isinstance(a, TensorLike)
assert isinstance(b, TensorLike)
# Validates the cast is safe
# TODO: move this as an option on the reference
# a_typ = utils.dtype_to_type(a.dtype)
# b_typ = utils.dtype_to_type(b.dtype)
# if a_typ is not utils.get_higher_type(a_typ, b_typ):
# raise RuntimeError(str(b.dtype), " can't be cast safely to ", str(a.dtype), "!")
# Validates the tensors have the same number of elements
if a.numel() != b.numel():
msg = "Attempting to copy {0} elements to a tensor with {1} elements!".format(
b.numel(), a.numel()
)
raise RuntimeError(msg)
return a
def _copy_to_aten(a: Tensor, b: Tensor) -> Tensor:
return a.copy_(b)
_copy_to_doc = """
Copies the data in b to a and returns the modified a.
"""
# TODO: Remove safe casting and implement on reference instead
copy_to = _make_prim(
schema="copy_to(Tensor(a!) a, Tensor b) -> Tensor(a!)",
meta=_copy_to_meta,
impl_aten=_copy_to_aten,
return_type=RETURN_TYPE.INPLACE,
doc=_copy_to_doc,
)
def _resize_meta(a: TensorLikeType, shape: ShapeType):
return a.resize_(shape)
def _resize_aten(a: Tensor, shape: ShapeType) -> Tensor:
return a.resize_(shape)
_resize_doc = """
Gives a tensor with no elements a new shape, returning the modified tensor.
The tensor's strides are contiguous and its values are unitialized.
"""
# TODO: review support arbitrary resizes
resize = _make_prim(
schema="resize(Tensor(a!) a, int[] shape) -> Tensor(a!)",
meta=_resize_meta,
impl_aten=_resize_aten,
return_type=RETURN_TYPE.INPLACE,
doc=_resize_doc,
)
def _reduction_meta(inp, dims, *, output_dtype=None):
"""
Meta function for single output reduction operations
Stride logic is incorrect
"""
assert isinstance(inp, TensorLike)
if output_dtype is None:
output_dtype = inp.dtype
output_shape = utils.compute_reduction_output_shape(inp.shape, dims)
return TensorMeta(
shape=output_shape,
strides=utils.make_contiguous_strides_for(output_shape),
dtype=output_dtype,
device=inp.device,
)
def _var_reduction_meta(inp, dims, *, correction):
if utils.is_complex_dtype(inp.dtype):
output_dtype = utils.corresponding_real_dtype(inp.dtype)
else:
output_dtype = inp.dtype
return _reduction_meta(inp, dims, output_dtype=output_dtype)
_sum_doc = """
Computes the sum of elements in the input tensor over the list of dimensions
specified in the dim argument
"""
_prod_doc = """
Computes the product of elements in the input tensor over the list of dimensions
specified in the dim argument
"""
_amax_doc = """
Computes the maximum value of elements in the input tensor over the list of dimensions
specified in the dim argument
"""
_amin_doc = """
Computes the minimum value of elements in the input tensor over the list of dimensions
specified in the dim argument
"""
_var_doc = """
Computes the biased variance of x over the list of dimensions specified in the dim argument
"""
def _make_reduction_prim(name: str, impl_aten, doc):
"""Creates a reduction prim."""
return _make_prim(
schema=f"{name}(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor",
meta=_reduction_meta,
impl_aten=impl_aten,
return_type=RETURN_TYPE.NEW,
doc=doc,
)
def _make_var_reduction_prim(name: str, impl_aten, doc):
"""Creates a reduction prim."""
return _make_prim(
schema=f"{name}(Tensor inp, int[]? dims, *, int correction, ScalarType? output_dtype=None) -> Tensor",
meta=_var_reduction_meta,
impl_aten=impl_aten,
return_type=RETURN_TYPE.NEW,
doc=doc,
)
sum = _make_reduction_prim(
name="sum",
impl_aten=torch.sum,
doc=_sum_doc,
)
def _prod_aten(
inp: TensorLikeType,
dims: Optional[DimsSequenceType],
*,
dtype: Optional[torch.dtype] = None,
) -> Tensor:
if dims is not None:
for d in sorted(dims, reverse=True):
assert d >= 0
inp = torch.prod(inp, d, dtype=dtype)
return inp
else:
return torch.prod(inp, dims, dtype=dtype)
prod = _make_reduction_prim(
name="prod",
impl_aten=_prod_aten,
doc=_prod_doc,
)
var = _make_var_reduction_prim(
name="var",
impl_aten=torch.var,
doc=_var_doc,
)
amax = _make_reduction_prim(
name="amax",
impl_aten=torch.amax,
doc=_amax_doc,
)
amin = _make_reduction_prim(
name="amin",
impl_aten=torch.amin,
doc=_amin_doc,
)
_arange_doc = """
Constructs a 1-D tensor with values from the interval [start, end) taken
with common difference `step` beginning from `start`.
"""
# TODO: layout, pin_memory, memory_format
# TODO: model requires_grad on TensorMeta
def _arange_meta(
start: NumberType,
end: NumberType,
step: NumberType,
*,
dtype: Optional[torch.dtype],
device: Optional[torch.device],
requires_grad: bool,
) -> TensorLikeType:
assert not (
isinstance(start, complex)
and isinstance(end, complex)
and isinstance(step, complex)
)
utils.check(
step != 0,
lambda: "step must be nonzero",
)
utils.check(
math.isfinite(start) and math.isfinite(end),
lambda: f"unsupported range: {start} -> {end}",
)
utils.check(
(step > 0 and end >= start) or (step < 0 and end <= start),
lambda: "upper bound and lower bound inconsistent with step sign",
)
if dtype is not None:
pass
elif all(isinstance(arg, int) for arg in (start, end, step)):
dtype = torch.int64
else:
dtype = torch.get_default_dtype()
device = _get_default_device() if device is None else device
shape = (math.ceil((end - start) / step),)
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device)
def _arange_aten(
start: NumberType,
end: NumberType,
step: NumberType,
*,
dtype: Optional[torch.dtype],
device: Optional[torch.device],
requires_grad: bool,
) -> TensorLikeType:
# mypy: Not all union combinations were tried because there are too many unions
return torch.arange( # type: ignore[call-overload, misc]
start, # type: ignore[arg-type]
end, # type: ignore[arg-type]
step, # type: ignore[arg-type]
dtype=dtype,
device=device,
layout=torch.strided,
pin_memory=False,
requires_grad=requires_grad,
)
# TODO: maybe prims should not have requires_grad arg
# see: https://github.com/pytorch/pytorch/pull/77542/files#r873943255
arange = _make_prim(
schema="arange(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype, Device? device, bool requires_grad) -> Tensor", # noqa: B950
return_type=RETURN_TYPE.NEW,
meta=_arange_meta,
impl_aten=_arange_aten,
doc=_arange_doc,
)
# TODO: layout, pin_memory, memory_format
# TODO: model requires_grad on TensorMeta
def _empty_meta(
shape: ShapeType, *, dtype: torch.dtype, device: torch.device, requires_grad: bool
) -> TensorLikeType:
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device)
def _empty_aten(
shape: ShapeType, *, dtype: torch.dtype, device: torch.device, requires_grad: bool
) -> Tensor:
return torch.empty(shape, dtype=dtype, device=device, requires_grad=requires_grad)
_empty_doc = """
Creates a tensor with uninitialized values and the specified shape, dtype, and device.
"""
empty = _make_prim(
schema="empty(int[] shape, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor",
meta=_empty_meta,
impl_aten=_empty_aten,
return_type=RETURN_TYPE.NEW,
doc=_empty_doc,
)
def _empty_strided_meta(
shape: ShapeType,
strides: StrideType,
*,
dtype: torch.dtype,
device: torch.device,
requires_grad: bool,
) -> TensorLikeType:
return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device)
_empty_strided_doc = """
Creates a tensor with uninitialized values.
"""
# TODO: add layout, pin_memory
empty_strided = _make_prim(
schema="empty_strided(int[] shape, int[] strides, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor",
return_type=RETURN_TYPE.NEW,
meta=_empty_strided_meta,
impl_aten=torch.empty_strided,
doc=_empty_strided_doc,
)
def _full_meta(
shape: ShapeType,
fill_value: NumberType,
*,
dtype: torch.dtype,
device: torch.device,
requires_grad: bool,
) -> TensorLikeType:
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device)
def _full_aten(
shape: ShapeType,
fill_value: NumberType,
*,
dtype: torch.dtype,
device: torch.device,
requires_grad: bool,
) -> Tensor:
# Note that Mypy thinks torch.full can't accept a complex fill_value
return torch.full(
shape, fill_value, dtype=dtype, device=device, requires_grad=requires_grad # type: ignore[arg-type]
)
_full_doc = """
Creates a tensor filled with the given fill value, and with the specified shape, dtype, and device.
"""
# TODO: add layout
full = _make_prim(
schema="full(int[] shape, Scalar fill_value, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor",
meta=_full_meta,
impl_aten=_full_aten,
return_type=RETURN_TYPE.NEW,
doc=_full_doc,
)
def _full_like_meta(
a: TensorLikeType,
fill_value: NumberType,
*,
dtype: torch.dtype,
device: torch.device,
requires_grad: bool,
) -> TensorLikeType:
strides = utils.compute_elementwise_output_strides(a)
if a.numel() == 0:
strides = a.stride()
return TensorMeta(a, strides=strides, dtype=dtype, device=device)
def _full_like_aten(
a: Tensor,
fill_value: NumberType,
*,
dtype: torch.dtype,
device: torch.device,
requires_grad: bool,
) -> Tensor:
# Note that Mypy thinks torch.full can't accept a complex fill_value
return torch.full_like(
a, fill_value, dtype=dtype, device=device, requires_grad=requires_grad # type: ignore[arg-type]
)
_full_like_doc = """
Creates a tensor filled with the given fill value, and the same shape, dtype, and device as the
given tensor by default. The dtype and device settings can be overridden
by specifying them explicitly.
"""
full_like = _make_prim(
schema="full_like(Tensor a, Scalar fill_value, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor",
meta=_full_like_meta,
impl_aten=_full_like_aten,
return_type=RETURN_TYPE.NEW,
doc=_full_like_doc,
)
def _scalar_tensor_meta(
scalar: NumberType,
*,
dtype: torch.dtype,
device: torch.device,
) -> TensorLikeType:
shape: ShapeType = []
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(scalar, shape=shape, strides=strides, dtype=dtype, device=device)
def _scalar_tensor_aten(
scalar: NumberType,
*,
dtype: torch.dtype,
device: torch.device,
) -> Tensor:
if isinstance(scalar, complex) and (
dtype is None or not utils.is_complex_dtype(dtype)
):
raise TypeError("Complex scalar requires complex tensor dtype.")
# Note that Mypy thinks torch.scalar can't accept a complex scalar
return torch.scalar_tensor(scalar, dtype=dtype, device=device) # type: ignore[arg-type]
_scalar_tensor_doc = """
Wraps a Number into a Tensor with the specified dtype and device.
"""
# TODO: add layout and pin_memory support
scalar_tensor = _make_prim(
schema="scalar_tensor(Scalar s, *, ScalarType? dtype=None, Device? device=None) -> Tensor",
meta=_scalar_tensor_meta,
impl_aten=_scalar_tensor_aten,
return_type=RETURN_TYPE.NEW,
doc=_scalar_tensor_doc,
)
#
# Linear algebra (linalg) prims
#
def _svd_meta(
A: TensorLikeType, *, full_matrices: bool
) -> Tuple[TensorLikeType, TensorLikeType, TensorLikeType]:
utils.check_is_matrix(A, "linalg.svd")
utils.check_fp_or_complex(A.dtype, "linalg.svd", allow_low_precision_dtypes=False)
A_shape = A.shape
batch = A_shape[:-2]
m, n = A_shape[-2:]
k = min(m, n)
shape_U = batch + (m, m if full_matrices else k)
strides_U = utils.make_contiguous_strides_for(shape_U, row_major=False)
U = TensorMeta(shape=shape_U, strides=strides_U, dtype=A.dtype, device=A.device)
shape_S = batch + (k,)
strides_S = utils.make_contiguous_strides_for(shape_S)
S = TensorMeta(
shape=shape_S,
strides=strides_S,
dtype=utils.corresponding_real_dtype(A.dtype) if A.is_complex() else A.dtype,
device=A.device,
)
shape_Vh = batch + (n if full_matrices else k, n)
# The CPU backend returns V, but the cuSolver backend returns V^H
# TODO The MAGMA backend returns V, so this is wrong if used with the MAGMA backend
is_cuda = A.device.type == "cuda"
strides_Vh = utils.make_contiguous_strides_for(shape_Vh, row_major=is_cuda)
Vh = TensorMeta(shape=shape_Vh, strides=strides_Vh, dtype=A.dtype, device=A.device)
return U, S, Vh
def _svd_aten(
A: TensorLikeType, *, full_matrices: bool
) -> Tuple[Tensor, Tensor, Tensor]:
return torch.linalg.svd(A, full_matrices=full_matrices)
_svd_doc = """
Returns the SVD of a matrix or batch of matrices.
The `full_matrices` flag controls whether the full or reduced SVD decomposition is returned.
"""
svd = _make_prim(
schema="svd(Tensor A, *, bool full_matrices) -> (Tensor U, Tensor S, Tensor Vh)",
meta=_svd_meta,
impl_aten=_svd_aten,
return_type=(RETURN_TYPE.NEW, RETURN_TYPE.NEW, RETURN_TYPE.NEW),
doc=_svd_doc,
)
#
# Randomness Prims
#
def _uniform_meta(
shape: ShapeType,
*,
low: float,
high: float,
dtype: torch.dtype,
device: torch.device,
) -> TensorLikeType:
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=device)
def _uniform_aten(
shape: ShapeType,
*,
low: float,
high: float,
dtype: torch.dtype,
device: torch.device,
) -> Tensor:
a = torch.empty(shape, dtype=dtype, device=device)
a.uniform_(low, high)
return a
_uniform_doc = """
Constructs a tensor filled with values drawn uniformly from low to high.
"""
# TODO: we should more seriously review randomness modeling and prims
uniform = _make_prim(
schema=(
"uniform(int[] shape, *, Scalar low, Scalar high, ScalarType dtype, Device device) -> Tensor"
),
return_type=RETURN_TYPE.NEW,
meta=_uniform_meta,
impl_aten=_uniform_aten,
doc=_uniform_doc,
)
def _fft_r2c_meta(
input: TensorLike,
*,
dim: DimsSequenceType,
onesided: bool,
) -> TensorLikeType:
dim = utils.canonicalize_dims(input.ndim, dim)
utils.validate_no_repeating_dims(dim)
shape = list(input.shape)
if onesided:
last_dim = dim[-1]
shape[last_dim] = shape[last_dim] // 2 + 1
dtype = utils.corresponding_complex_dtype(input.dtype)
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=input.device)
def _fft_r2c_aten(
input: TensorLike,
*,
dim: DimsSequenceType,
onesided: bool,
) -> TensorLikeType:
normalization = 0 # No normalization
return torch._fft_r2c(input, dim, normalization, onesided)
_fft_r2c_doc = """
Performs a real to complex Fast Fourier Transform
"""
fft_r2c = _make_prim(
schema="fft_r2c(Tensor self, *, int[] dim, bool onesided) -> Tensor",
meta=_fft_r2c_meta,
impl_aten=_fft_r2c_aten,
return_type=RETURN_TYPE.NEW,
doc=_fft_r2c_doc,
)
def _fft_c2c_meta(
input: TensorLike,
*,
dim: DimsSequenceType,
forward: bool,
) -> TensorLikeType:
dim = utils.canonicalize_dims(input.ndim, dim)
utils.validate_no_repeating_dims(dim)
shape = input.shape
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(
shape=shape, strides=strides, dtype=input.dtype, device=input.device
)
def _fft_c2c_aten(
input: TensorLike,
*,
dim: DimsSequenceType,
forward: bool,
) -> TensorLikeType:
normalization = 0 # No normalization
return torch._fft_c2c(input, dim, normalization, forward)
_fft_c2c_doc = """
Performs either a Fast Fourier Transform, or its inverse
"""
fft_c2c = _make_prim(
schema="fft_c2c(Tensor self, *, int[] dim, bool forward) -> Tensor",
meta=_fft_c2c_meta,
impl_aten=_fft_c2c_aten,
return_type=RETURN_TYPE.NEW,
doc=_fft_c2c_doc,
)
def _fft_c2r_meta(
input: TensorLike,
*,
dim: DimsSequenceType,
last_dim_size: int,
) -> TensorLikeType:
dim = utils.canonicalize_dims(input.ndim, dim)
utils.validate_no_repeating_dims(dim)
shape = list(input.shape)
shape[dim[-1]] = last_dim_size
dtype = utils.corresponding_real_dtype(input.dtype)
strides = utils.make_contiguous_strides_for(shape)
return TensorMeta(shape=shape, strides=strides, dtype=dtype, device=input.device)
def _fft_c2r_aten(
input: TensorLike,
*,
dim: DimsSequenceType,
last_dim_size: int,
) -> TensorLikeType:
normalization = 0 # No normalization
return torch._fft_c2r(input, dim, normalization, last_dim_size)
_fft_c2r_doc = """
Performs a complex to real Inverse Fast Fourier Transform
"""
fft_c2r = _make_prim(
schema="fft_c2r(Tensor self, *, int[] dim, int last_dim_size) -> Tensor",
meta=_fft_c2r_meta,
impl_aten=_fft_c2r_aten,
return_type=RETURN_TYPE.NEW,
doc=_fft_c2r_doc,
)
register_nvprims()
|
pytorch-master
|
torch/_prims/__init__.py
|
import functools
from contextlib import nullcontext
from typing import Any, Callable, Dict, Sequence
import torch
import torch._prims
import torch._refs
import torch._refs.nn
import torch._refs.nn.functional
import torch._refs.special
import torch.overrides
from torch._prims_common import torch_function_passthrough
from torch.fx.experimental.proxy_tensor import get_isolated_graphmodule
@functools.lru_cache(None)
def torch_to_refs_map():
"""
Mapping of torch API functions to torch._refs functions.
E.g. torch_to_refs_map()[torch.add] == torch._refs.add
"""
modules = [
(torch, torch._refs),
(torch.nn, torch._refs.nn),
(torch.nn.functional, torch._refs.nn.functional),
(torch.special, torch._refs.special),
(torch.fft, torch._refs.fft),
(torch.linalg, torch._refs.linalg),
]
r: Dict[Any, Any] = {
torch.Tensor.__invert__: torch._refs.bitwise_not,
torch.Tensor.__xor__: torch._refs.bitwise_xor,
torch.Tensor.__and__: torch._refs.bitwise_and,
torch.Tensor.__or__: torch._refs.bitwise_or,
torch.Tensor.__eq__: torch._refs.eq,
torch.Tensor.new_empty: torch._refs.new_empty,
torch.Tensor.new_full: torch._refs.new_full,
torch.Tensor.new_zeros: torch._refs.new_zeros,
torch.Tensor.new_ones: torch._refs.new_ones,
torch.Tensor.fill_: torch._refs.fill_,
torch.Tensor.zero_: torch._refs.zero_,
# TODO: Should these methods be mapped some other way?
torch.Tensor.copy_: torch._prims.copy_to,
torch.Tensor.resize: torch._prims.resize,
}
for mod_torch, mod_refs in modules:
for s in mod_refs.__all__: # type: ignore[attr-defined]
r[mod_torch.__dict__.get(s)] = mod_refs.__dict__.get(s)
# Support remapping torch.Tensor.foo to _refs.foo
for s in dir(torch.Tensor):
if s in torch._refs.__all__:
r[getattr(torch.Tensor, s)] = torch._refs.__dict__.get(s)
return r
@functools.lru_cache(None)
def all_prims():
"""
Set of all prim functions, e.g., torch._prims.add in all_prims()
"""
return {torch._prims.__dict__.get(s) for s in torch._prims.__all__}
class NvfuserPrimsMode(torch.overrides.TorchFunctionMode):
"""
Switches the interpretation of torch.ops.prims.* functions to
use nvFuser's prims in torch.ops.nvprims.*
>>> with NvfuserPrimMode():
... torch.ops.prims.add(x, y) # calls torch.ops.nvprims.add(x, y)
By default, this context manager will fall back on the torch.ops.prims* if the
nvprim does not exist.
"""
def __torch_function__(
self,
orig_func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Dict = None,
):
if kwargs is None:
kwargs = {}
if isinstance(orig_func, torch._ops.OpOverload) or isinstance(
orig_func, torch._ops.OpOverloadPacket
):
namespace = str(orig_func).split(".")[0]
name = str(orig_func).split(".")[1]
if namespace == "prims":
nvfunc = getattr(torch.ops.nvprims, name, None)
if nvfunc is not None:
return nvfunc(*args, **kwargs)
return orig_func(*args, **kwargs)
class TorchRefsMode(torch.overrides.TorchFunctionMode):
"""
Switches the interpretation of torch.* functions and Tensor methods to
use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.)
>>> # xdoctest: +SKIP
>>> with TorchRefsMode():
... torch.add(x, y) # calls torch._refs.add(x, y)
By default, this context manager will fall back on the torch.* if the
ref does not exist; set strict=True to error if this occurs.
If the ref exists we still would like to fall back on the torch.* sometimes,
this behavior can be customized by passing a function to should_fallback_fn.
"""
def __init__(
self,
strict=False,
should_fallback_fn=lambda *_: False,
prims_mode_cls=nullcontext,
):
self.strict = strict
self.should_fallback_fn = should_fallback_fn
self.prims_mode_cls = prims_mode_cls
def __torch_function__(
self,
orig_func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Dict = None,
):
if kwargs is None:
kwargs = {}
# For primitive operations, run them as is without interception
# Unless we are in prims_mode, in which case we want to use nvprims
if orig_func in torch_function_passthrough or orig_func in all_prims():
with self.prims_mode_cls():
return orig_func(*args, **kwargs)
mapping = torch_to_refs_map()
func = mapping.get(orig_func, None)
if func is not None:
# If the ref exists query whether we should use it or not
if self.should_fallback_fn(self, func, args, kwargs):
return orig_func(*args, **kwargs)
# torch calls inside func should be interpreted as refs calls
with torch.overrides.enable_torch_function_mode(self, replace=self.inner):
return func(*args, **kwargs)
if self.strict:
raise RuntimeError(
f"no _refs support for {torch.overrides.resolve_name(orig_func)}"
)
return orig_func(*args, **kwargs)
def _is_node_supported_nvfuser(node):
return (
node.op == "call_function"
and getattr(node.target, "impl_nvfuser", None) is not None
)
def _is_func_unsupported_nvfuser(torch_function_mode, func, args, kwargs):
with torch.overrides.enable_torch_function_mode(
torch_function_mode, replace=torch_function_mode.inner
):
gm = get_isolated_graphmodule(func, args, kwargs)
call_function_nodes = filter(lambda n: n.op == "call_function", gm.graph.nodes)
any_unsupported = any(
not _is_node_supported_nvfuser(node) for node in call_function_nodes
)
return any_unsupported
TorchRefsNvfuserCapabilityMode = functools.partial(
TorchRefsMode,
should_fallback_fn=_is_func_unsupported_nvfuser,
prims_mode_cls=NvfuserPrimsMode,
)
|
pytorch-master
|
torch/_prims/context.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.